From c95e873564efbfb6fadb9942b2bcdda7d90ec56f Mon Sep 17 00:00:00 2001 From: will-2012 Date: Fri, 9 Jun 2023 10:23:08 +0800 Subject: [PATCH 01/78] perf: add more metrics to perf --- base/gnfd/gnfd_service.go | 32 ++++++++++++++++++++--- modular/approver/approve_task.go | 45 ++++++++++++++++++++++++++++++-- modular/executor/execute_task.go | 2 +- modular/gater/admin_handler.go | 10 +++++++ pkg/metrics/metric_items.go | 24 +++++++++++++---- 5 files changed, 101 insertions(+), 12 deletions(-) diff --git a/base/gnfd/gnfd_service.go b/base/gnfd/gnfd_service.go index 91c2c00cc..e57f14301 100644 --- a/base/gnfd/gnfd_service.go +++ b/base/gnfd/gnfd_service.go @@ -6,6 +6,7 @@ import ( "strconv" "time" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "github.com/cosmos/cosmos-sdk/types/query" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -19,6 +20,8 @@ import ( // CurrentHeight the block height sub one as the stable height. func (g *Gnfd) CurrentHeight(ctx context.Context) (uint64, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_height").Observe(time.Since(startTime).Seconds()) resp, err := g.getCurrentWsClient().ABCIInfo(ctx) if err != nil { log.CtxErrorw(ctx, "get latest block height failed", "node_addr", @@ -30,6 +33,8 @@ func (g *Gnfd) CurrentHeight(ctx context.Context) (uint64, error) { // HasAccount returns an indication of the existence of address. func (g *Gnfd) HasAccount(ctx context.Context, address string) (bool, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_account").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.Account(ctx, &authtypes.QueryAccountRequest{Address: address}) if err != nil { @@ -41,6 +46,8 @@ func (g *Gnfd) HasAccount(ctx context.Context, address string) (bool, error) { // ListSPs returns the list of storage provider info. func (g *Gnfd) ListSPs(ctx context.Context) ([]*sptypes.StorageProvider, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("list_sps").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() var spInfos []*sptypes.StorageProvider resp, err := client.StorageProviders(ctx, &sptypes.QueryStorageProvidersRequest{ @@ -61,6 +68,8 @@ func (g *Gnfd) ListSPs(ctx context.Context) ([]*sptypes.StorageProvider, error) // ListBondedValidators returns the list of bonded validators. func (g *Gnfd) ListBondedValidators(ctx context.Context) ([]stakingtypes.Validator, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("list_bonded_validators").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() var validators []stakingtypes.Validator resp, err := client.Validators(ctx, &stakingtypes.QueryValidatorsRequest{Status: "BOND_STATUS_BONDED"}) @@ -76,6 +85,8 @@ func (g *Gnfd) ListBondedValidators(ctx context.Context) ([]stakingtypes.Validat // QueryStorageParams returns storage params func (g *Gnfd) QueryStorageParams(ctx context.Context) (params *storagetypes.Params, err error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_storage_params").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.StorageQueryClient.Params(ctx, &storagetypes.QueryParamsRequest{}) if err != nil { @@ -86,10 +97,9 @@ func (g *Gnfd) QueryStorageParams(ctx context.Context) (params *storagetypes.Par } // QueryStorageParamsByTimestamp returns storage params by block create time. -func (g *Gnfd) QueryStorageParamsByTimestamp( - ctx context.Context, - timestamp int64) ( - params *storagetypes.Params, err error) { +func (g *Gnfd) QueryStorageParamsByTimestamp(ctx context.Context, timestamp int64) (params *storagetypes.Params, err error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_storage_params_by_timestamp").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.StorageQueryClient.QueryParamsByTimestamp(ctx, &storagetypes.QueryParamsByTimestampRequest{Timestamp: timestamp}) @@ -102,6 +112,8 @@ func (g *Gnfd) QueryStorageParamsByTimestamp( // QueryBucketInfo returns the bucket info by name. func (g *Gnfd) QueryBucketInfo(ctx context.Context, bucket string) (*storagetypes.BucketInfo, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_bucket").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.HeadBucket(ctx, &storagetypes.QueryHeadBucketRequest{BucketName: bucket}) if err != nil { @@ -113,6 +125,8 @@ func (g *Gnfd) QueryBucketInfo(ctx context.Context, bucket string) (*storagetype // QueryObjectInfo returns the object info by name. func (g *Gnfd) QueryObjectInfo(ctx context.Context, bucket, object string) (*storagetypes.ObjectInfo, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_object").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.HeadObject(ctx, &storagetypes.QueryHeadObjectRequest{ BucketName: bucket, @@ -127,6 +141,8 @@ func (g *Gnfd) QueryObjectInfo(ctx context.Context, bucket, object string) (*sto // QueryObjectInfoByID returns the object info by name. func (g *Gnfd) QueryObjectInfoByID(ctx context.Context, objectID string) (*storagetypes.ObjectInfo, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_object_by_id").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.HeadObjectById(ctx, &storagetypes.QueryHeadObjectByIdRequest{ ObjectId: objectID, @@ -155,6 +171,8 @@ func (g *Gnfd) QueryBucketInfoAndObjectInfo(ctx context.Context, bucket, object // ListenObjectSeal returns an indication of the object is sealed. // TODO:: retrieve service support seal event subscription func (g *Gnfd) ListenObjectSeal(ctx context.Context, objectID uint64, timeoutHeight int) (bool, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("wait_object_seal").Observe(time.Since(startTime).Seconds()) var ( objectInfo *storagetypes.ObjectInfo err error @@ -180,6 +198,8 @@ func (g *Gnfd) ListenObjectSeal(ctx context.Context, objectID uint64, timeoutHei // QueryPaymentStreamRecord returns the steam record info by account. func (g *Gnfd) QueryPaymentStreamRecord(ctx context.Context, account string) (*paymenttypes.StreamRecord, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("query_payment_stream_record").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.StreamRecord(ctx, &paymenttypes.QueryGetStreamRecordRequest{ Account: account, @@ -193,6 +213,8 @@ func (g *Gnfd) QueryPaymentStreamRecord(ctx context.Context, account string) (*p // VerifyGetObjectPermission verifies get object permission. func (g *Gnfd) VerifyGetObjectPermission(ctx context.Context, account, bucket, object string) (bool, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("verify_get_object_permission").Observe(time.Since(startTime).Seconds()) client := g.getCurrentClient().GnfdClient() resp, err := client.VerifyPermission(ctx, &storagetypes.QueryVerifyPermissionRequest{ Operator: account, @@ -212,6 +234,8 @@ func (g *Gnfd) VerifyGetObjectPermission(ctx context.Context, account, bucket, o // VerifyPutObjectPermission verifies put object permission. func (g *Gnfd) VerifyPutObjectPermission(ctx context.Context, account, bucket, object string) (bool, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("verify_put_object_permission").Observe(time.Since(startTime).Seconds()) _ = object client := g.getCurrentClient().GnfdClient() resp, err := client.VerifyPermission(ctx, &storagetypes.QueryVerifyPermissionRequest{ diff --git a/modular/approver/approve_task.go b/modular/approver/approve_task.go index c0967e3cd..9df5f1de1 100644 --- a/modular/approver/approve_task.go +++ b/modular/approver/approve_task.go @@ -3,12 +3,14 @@ package approver import ( "context" "net/http" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/core/module" coretask "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/core/taskqueue" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" ) var ( @@ -29,7 +31,7 @@ func (a *ApprovalModular) HandleCreateBucketApprovalTask(ctx context.Context, ta currentHeight uint64 ) if task == nil || task.GetCreateBucketInfo() == nil { - log.CtxErrorw(ctx, "failed to pre create bucket approval, pointer nil") + log.CtxErrorw(ctx, "failed to pre create bucket approval due to pointer nil") return false, ErrDanglingPointer } defer func() { @@ -38,18 +40,29 @@ func (a *ApprovalModular) HandleCreateBucketApprovalTask(ctx context.Context, ta } log.CtxDebugw(ctx, task.Info()) }() + startQueryQueue := time.Now() if a.bucketQueue.Has(task.Key()) { shadowTask := a.bucketQueue.PopByKey(task.Key()) task.SetCreateBucketInfo(shadowTask.(coretask.ApprovalCreateBucketTask).GetCreateBucketInfo()) _ = a.bucketQueue.Push(shadowTask) + metrics.GnfdChainHistogram.WithLabelValues("check_repeated_in_create_bucket_approval"). + Observe(time.Since(startQueryQueue).Seconds()) log.CtxErrorw(ctx, "repeated create bucket approval task is returned") return true, nil } + metrics.GnfdChainHistogram.WithLabelValues("check_repeated_in_create_bucket_approval"). + Observe(time.Since(startQueryQueue).Seconds()) + + startQueryMetadata := time.Now() buckets, err := a.baseApp.GfSpClient().GetUserBucketsCount(ctx, task.GetCreateBucketInfo().GetCreator(), false) if err != nil { + metrics.GnfdChainHistogram.WithLabelValues("check_counter_in_create_bucket_approval"). + Observe(time.Since(startQueryMetadata).Seconds()) log.CtxErrorw(ctx, "failed to get account owns max bucket number", "error", err) return false, err } + metrics.GnfdChainHistogram.WithLabelValues("check_counter_in_create_bucket_approval"). + Observe(time.Since(startQueryMetadata).Seconds()) if buckets >= a.accountBucketNumber { log.CtxErrorw(ctx, "account owns bucket number exceed") err = ErrExceedBucketNumber @@ -57,22 +70,33 @@ func (a *ApprovalModular) HandleCreateBucketApprovalTask(ctx context.Context, ta } // begin to sign the new approval task + startQueryChain := time.Now() currentHeight, err = a.baseApp.Consensus().CurrentHeight(ctx) + metrics.GnfdChainHistogram.WithLabelValues("query_current_height_in_create_bucket_approval"). + Observe(time.Since(startQueryChain).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get current height", "error", err) return false, ErrConsensus } task.SetExpiredHeight(currentHeight + a.bucketApprovalTimeoutHeight) + startSignApproval := time.Now() signature, err = a.baseApp.GfSpClient().SignCreateBucketApproval(ctx, task.GetCreateBucketInfo()) + metrics.GnfdChainHistogram.WithLabelValues("sign_in_create_bucket_approval"). + Observe(time.Since(startSignApproval).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign the create bucket approval", "error", err) return false, ErrSigner } task.GetCreateBucketInfo().GetPrimarySpApproval().Sig = signature + startPushQueue := time.Now() if err = a.bucketQueue.Push(task); err != nil { + metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_bucket_approval"). + Observe(time.Since(startPushQueue).Seconds()) log.CtxErrorw(ctx, "failed to push the create bucket approval to queue", "error", err) return false, err } + metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_bucket_approval"). + Observe(time.Since(startPushQueue).Seconds()) return true, nil } @@ -90,7 +114,7 @@ func (a *ApprovalModular) HandleCreateObjectApprovalTask(ctx context.Context, ta currentHeight uint64 ) if task == nil || task.GetCreateObjectInfo() == nil { - log.CtxErrorw(ctx, "failed to pre create object approval, pointer nil") + log.CtxErrorw(ctx, "failed to pre create object approval due to pointer nil") return false, ErrDanglingPointer } defer func() { @@ -99,31 +123,48 @@ func (a *ApprovalModular) HandleCreateObjectApprovalTask(ctx context.Context, ta } log.CtxDebugw(ctx, task.Info()) }() + + startQueryQueue := time.Now() if a.objectQueue.Has(task.Key()) { shadowTask := a.objectQueue.PopByKey(task.Key()) task.SetCreateObjectInfo(shadowTask.(coretask.ApprovalCreateObjectTask).GetCreateObjectInfo()) _ = a.objectQueue.Push(shadowTask) + metrics.GnfdChainHistogram.WithLabelValues("check_repeated_in_create_object_approval"). + Observe(time.Since(startQueryQueue).Seconds()) log.CtxErrorw(ctx, "repeated create object approval task is returned") return true, nil } + metrics.GnfdChainHistogram.WithLabelValues("check_repeated_in_create_object_approval"). + Observe(time.Since(startQueryQueue).Seconds()) // begin to sign the new approval task + startQueryChain := time.Now() currentHeight, err = a.baseApp.Consensus().CurrentHeight(ctx) + metrics.GnfdChainHistogram.WithLabelValues("query_current_height_in_create_object_approval"). + Observe(time.Since(startQueryChain).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get current height", "error", err) return false, ErrConsensus } task.SetExpiredHeight(currentHeight + a.objectApprovalTimeoutHeight) + startSignApproval := time.Now() signature, err = a.baseApp.GfSpClient().SignCreateObjectApproval(ctx, task.GetCreateObjectInfo()) + metrics.GnfdChainHistogram.WithLabelValues("sign_in_create_object_approval"). + Observe(time.Since(startSignApproval).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign the create object approval", "error", err) return false, err } task.GetCreateObjectInfo().GetPrimarySpApproval().Sig = signature + startPushQueue := time.Now() if err = a.objectQueue.Push(task); err != nil { + metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_object_approval"). + Observe(time.Since(startPushQueue).Seconds()) log.CtxErrorw(ctx, "failed to push the create object task to queue", "error", err) return false, err } + metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_object_approval"). + Observe(time.Since(startPushQueue).Seconds()) return true, nil } diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 26d84d7c0..5249b5fdd 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -253,7 +253,7 @@ func (e *ExecuteModular) HandleGCObjectTask(ctx context.Context, task coretask.G task.SetLastDeletedObjectId(currentGCObjectID) metrics.GCObjectCounter.WithLabelValues(e.Name()).Inc() if taskIsCanceled = reportProgress(); taskIsCanceled { - log.CtxErrorw(ctx, "gc object task has been canceled", "task_info", task.Info()) + log.CtxErrorw(ctx, "gc object task has been canceled", "current_gc_object_info", objectInfo, "task_info", task.Info()) return } log.CtxDebugw(ctx, "succeed to gc an object", "object_info", objectInfo, "deleted_at_block_id", currentGCBlockID) diff --git a/modular/gater/admin_handler.go b/modular/gater/admin_handler.go index 890fdd726..3957eaedc 100644 --- a/modular/gater/admin_handler.go +++ b/modular/gater/admin_handler.go @@ -6,7 +6,9 @@ import ( "io" "net/http" "strings" + "time" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" sdktypes "github.com/cosmos/cosmos-sdk/types" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" @@ -75,9 +77,11 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) return } if reqCtx.NeedVerifyAuthorizer() { + startVerifyAuthorize := time.Now() authorized, err = g.baseApp.GfSpClient().VerifyAuthorize( reqCtx.Context(), coremodule.AuthOpAskCreateBucketApproval, reqCtx.Account(), createBucketApproval.GetBucketName(), "") + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthorize).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) return @@ -91,7 +95,9 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) task := &gfsptask.GfSpCreateBucketApprovalTask{} task.InitApprovalCreateBucketTask(&createBucketApproval, g.baseApp.TaskPriority(task)) var approvalTask coretask.ApprovalCreateBucketTask + startAskCreateBucketApproval := time.Now() approved, approvalTask, err = g.baseApp.GfSpClient().AskCreateBucketApproval(reqCtx.Context(), task) + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("ask_create_bucket_approval").Observe(time.Since(startAskCreateBucketApproval).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to ask create bucket approval", "error", err) return @@ -117,10 +123,12 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) return } if reqCtx.NeedVerifyAuthorizer() { + startVerifyAuthorize := time.Now() authorized, err = g.baseApp.GfSpClient().VerifyAuthorize( reqCtx.Context(), coremodule.AuthOpAskCreateObjectApproval, reqCtx.Account(), createObjectApproval.GetBucketName(), createObjectApproval.GetObjectName()) + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthorize).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) return @@ -134,7 +142,9 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) task := &gfsptask.GfSpCreateObjectApprovalTask{} task.InitApprovalCreateObjectTask(&createObjectApproval, g.baseApp.TaskPriority(task)) var approvedTask coretask.ApprovalCreateObjectTask + startAskCreateObjectApproval := time.Now() approved, approvedTask, err = g.baseApp.GfSpClient().AskCreateObjectApproval(r.Context(), task) + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("ask_create_object_approval").Observe(time.Since(startAskCreateObjectApproval).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to ask object approval", "error", err) return diff --git a/pkg/metrics/metric_items.go b/pkg/metrics/metric_items.go index eaf3e11a9..917837692 100644 --- a/pkg/metrics/metric_items.go +++ b/pkg/metrics/metric_items.go @@ -7,8 +7,6 @@ import ( metricshttp "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics/http" ) -// const serviceLabelName = "service" - var MetricsItems = []prometheus.Collector{ // Grpc metrics category DefaultGRPCServerMetrics, @@ -17,6 +15,7 @@ var MetricsItems = []prometheus.Collector{ DefaultHTTPServerMetrics, // Perf workflow category PerfUploadTimeHistogram, + PerfGetApprovalTimeHistogram, // TaskQueue metrics category QueueSizeGauge, QueueCapGauge, @@ -82,6 +81,8 @@ var MetricsItems = []prometheus.Collector{ SPDBTimeHistogram, // BlockSyncer metrics category BlockHeightLagGauge, + // the greenfield chain metrics. + GnfdChainHistogram, } var ( @@ -93,12 +94,18 @@ var ( // DefaultHTTPServerMetrics defines default HTTP server metrics DefaultHTTPServerMetrics = metricshttp.NewServerMetrics() - // perf upload workflow + // PerfUploadTimeHistogram is used to perf upload workflow. PerfUploadTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "perf_upload_time", Help: "Track upload workflow costs.", Buckets: prometheus.DefBuckets, }, []string{"perf_upload_time"}) + // PerfGetApprovalTimeHistogram is used to perf get approval workflow + PerfGetApprovalTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "perf_get_approval_time", + Help: "Track get approval workflow costs.", + Buckets: prometheus.DefBuckets, + }, []string{"perf_get_approval_time"}) // task queue metrics QueueSizeGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -358,11 +365,18 @@ var ( Name: "sp_db_handling_seconds", Help: "Track the latency for spdb requests", Buckets: prometheus.DefBuckets, - }, []string{"method_name"}) + }, []string{"sp_db_handling_seconds"}) // BlockHeightLagGauge records the current block height of block syncer service BlockHeightLagGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "block_syncer_height", Help: "Current block number of block syncer progress.", - }, []string{"service"}) + }, []string{"block_syncer_height"}) + + // GnfdChainHistogram is used to record greenfield chain cost. + GnfdChainHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "gnfd_chain_time", + Help: "Track the greenfield chain api costs.", + Buckets: prometheus.DefBuckets, + }, []string{"gnfd_chain_time"}) ) From 13bcaa31b53471b0cea1ff3af0b522992ca09105 Mon Sep 17 00:00:00 2001 From: Barry <122767193+BarryTong65@users.noreply.github.com> Date: Fri, 9 Jun 2023 14:31:24 +0800 Subject: [PATCH 02/78] feat: list objects & buckets (#569) * feat: list objects and buckets * feat: add router test * fix: update router name and filter * fix: rename variable and set max size * refactor: response from slice to map --- base/gfspclient/metadata.go | 38 ++++++ modular/gater/const.go | 4 + modular/gater/metadata_handler.go | 139 +++++++++++++++++++- modular/gater/router.go | 14 +- modular/gater/router_test.go | 15 +++ modular/metadata/metadata_bucket_service.go | 59 +++++++++ modular/metadata/metadata_object_service.go | 57 ++++++++ proto/modular/metadata/types/metadata.proto | 30 +++++ store/bsdb/bucket.go | 20 +++ store/bsdb/common.go | 10 ++ store/bsdb/database.go | 4 + store/bsdb/object.go | 21 +++ 12 files changed, 405 insertions(+), 6 deletions(-) diff --git a/base/gfspclient/metadata.go b/base/gfspclient/metadata.go index 6680d53a1..9bbe7ee19 100644 --- a/base/gfspclient/metadata.go +++ b/base/gfspclient/metadata.go @@ -404,3 +404,41 @@ func (s *GfSpClient) GetGroupList( } return resp.Groups, resp.Count, nil } + +func (s *GfSpClient) ListBucketsByBucketID(ctx context.Context, bucketIDs []uint64, includeRemoved bool, opts ...grpc.DialOption) (map[uint64]*types.Bucket, error) { + conn, connErr := s.Connection(ctx, s.metadataEndpoint, opts...) + if connErr != nil { + log.CtxErrorw(ctx, "client failed to connect metadata", "error", connErr) + return nil, ErrRpcUnknown + } + defer conn.Close() + req := &types.GfSpListBucketsByBucketIDRequest{ + BucketIds: bucketIDs, + IncludeRemoved: includeRemoved, + } + resp, err := types.NewGfSpMetadataServiceClient(conn).GfSpListBucketsByBucketID(ctx, req) + if err != nil { + log.CtxErrorw(ctx, "client failed to list buckets by bucket ids", "error", err) + return nil, ErrRpcUnknown + } + return resp.Buckets, nil +} + +func (s *GfSpClient) ListObjectsByObjectID(ctx context.Context, objectIDs []uint64, includeRemoved bool, opts ...grpc.DialOption) (map[uint64]*types.Object, error) { + conn, connErr := s.Connection(ctx, s.metadataEndpoint, opts...) + if connErr != nil { + log.CtxErrorw(ctx, "client failed to connect metadata", "error", connErr) + return nil, ErrRpcUnknown + } + defer conn.Close() + req := &types.GfSpListObjectsByObjectIDRequest{ + ObjectIds: objectIDs, + IncludeRemoved: includeRemoved, + } + resp, err := types.NewGfSpMetadataServiceClient(conn).GfSpListObjectsByObjectID(ctx, req) + if err != nil { + log.CtxErrorw(ctx, "client failed to list objects by object ids", "error", err) + return nil, ErrRpcUnknown + } + return resp.Objects, nil +} diff --git a/modular/gater/const.go b/modular/gater/const.go index ce182f903..418a0f39a 100644 --- a/modular/gater/const.go +++ b/modular/gater/const.go @@ -74,6 +74,10 @@ const ( GetGroupListOffsetQuery = "offset" // GetGroupListGroupQuery defines get group list group query, which is used to route request GetGroupListGroupQuery = "group-query" + // ListBucketsByBucketID defines list buckets by bucket ids, which is used to route request + ListBucketsByBucketID = "buckets-query" + // ListObjectsByObjectID defines list objects by object ids, which is used to route request + ListObjectsByObjectID = "objects-query" // GetGroupListNameQuery defines get group list name query, which is used to route request GetGroupListNameQuery = "name" // GetGroupListPrefixQuery defines get group list prefix query, which is used to route request diff --git a/modular/gater/metadata_handler.go b/modular/gater/metadata_handler.go index eb240ba2b..3c7640a75 100644 --- a/modular/gater/metadata_handler.go +++ b/modular/gater/metadata_handler.go @@ -3,6 +3,7 @@ package gater import ( "bytes" "encoding/base64" + "encoding/json" "net/http" "net/url" "strconv" @@ -17,14 +18,16 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/modular/metadata/types" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/store/bsdb" "github.com/bnb-chain/greenfield-storage-provider/util" ) const ( - MaximumGetGroupListLimit = 1000 - MaximumGetGroupListOffset = 100000 - DefaultGetGroupListLimit = 50 - DefaultGetGroupListOffset = 0 + MaximumGetGroupListLimit = 1000 + MaximumGetGroupListOffset = 100000 + MaximumListObjectsAndBucketsSize = 1000 + DefaultGetGroupListLimit = 50 + DefaultGetGroupListOffset = 0 ) // getUserBucketsHandler handle get object request @@ -504,3 +507,131 @@ func (g *GateModular) getGroupListHandler(w http.ResponseWriter, r *http.Request w.Header().Set(ContentTypeHeader, ContentTypeJSONHeaderValue) w.Write(b.Bytes()) } + +// listObjectsByObjectIDHandler list objects by object ids +func (g *GateModular) listObjectsByObjectIDHandler(w http.ResponseWriter, r *http.Request) { + var ( + err error + buf bytes.Buffer + objects map[uint64]*types.Object + objectIDMap map[uint64]bool + ok bool + objectIDs bsdb.ObjectIDs + reqCtx *RequestContext + ) + + defer func() { + reqCtx.Cancel() + if err != nil { + reqCtx.SetError(gfsperrors.MakeGfSpError(err)) + log.CtxErrorw(reqCtx.Context(), "failed to list objects by ids", reqCtx.String()) + MakeErrorResponse(w, err) + } + }() + + reqCtx, _ = NewRequestContext(r, g) + + err = json.NewDecoder(r.Body).Decode(&objectIDs) + if err != nil { + log.Errorf("failed to parse object ids", "error", err) + err = ErrInvalidQuery + return + } + + if len(objectIDs.IDs) == 0 || len(objectIDs.IDs) > MaximumListObjectsAndBucketsSize { + log.Errorf("failed to check ids", "error", err) + err = ErrInvalidQuery + return + } + + objectIDMap = make(map[uint64]bool) + for _, id := range objectIDs.IDs { + if _, ok = objectIDMap[id]; ok { + // repeat id keys in request + log.Errorf("failed to check ids", "error", err) + err = ErrInvalidQuery + return + } + objectIDMap[id] = true + } + + objects, err = g.baseApp.GfSpClient().ListObjectsByObjectID(reqCtx.Context(), objectIDs.IDs, false) + if err != nil { + log.Errorf("failed to list objects by ids", "error", err) + return + } + grpcResponse := &types.GfSpListObjectsByObjectIDResponse{Objects: objects} + + m := jsonpb.Marshaler{EmitDefaults: true, OrigName: true, EnumsAsInts: true} + if err = m.Marshal(&buf, grpcResponse); err != nil { + log.Errorf("failed to list objects by ids", "error", err) + return + } + + w.Header().Set(ContentTypeHeader, ContentTypeJSONHeaderValue) + w.Write(buf.Bytes()) +} + +// listBucketsByBucketIDHandler list buckets by bucket ids +func (g *GateModular) listBucketsByBucketIDHandler(w http.ResponseWriter, r *http.Request) { + var ( + err error + buf bytes.Buffer + buckets map[uint64]*types.Bucket + bucketIDMap map[uint64]bool + ok bool + bucketIDs bsdb.BucketIDs + reqCtx *RequestContext + ) + + defer func() { + reqCtx.Cancel() + if err != nil { + reqCtx.SetError(gfsperrors.MakeGfSpError(err)) + log.CtxErrorw(reqCtx.Context(), "failed to list buckets by ids", reqCtx.String()) + MakeErrorResponse(w, err) + } + }() + + reqCtx, _ = NewRequestContext(r, g) + + err = json.NewDecoder(r.Body).Decode(&bucketIDs) + if err != nil { + log.Errorf("failed to parse bucket ids", "error", err) + err = ErrInvalidQuery + return + } + + if len(bucketIDs.IDs) == 0 || len(bucketIDs.IDs) > MaximumListObjectsAndBucketsSize { + log.Errorf("failed to check ids", "error", err) + err = ErrInvalidQuery + return + } + + bucketIDMap = make(map[uint64]bool) + for _, id := range bucketIDs.IDs { + if _, ok = bucketIDMap[id]; ok { + // repeat id keys in request + log.Errorf("failed to check ids", "error", err) + err = ErrInvalidQuery + return + } + bucketIDMap[id] = true + } + + buckets, err = g.baseApp.GfSpClient().ListBucketsByBucketID(reqCtx.Context(), bucketIDs.IDs, false) + if err != nil { + log.Errorf("failed to list buckets by ids", "error", err) + return + } + grpcResponse := &types.GfSpListBucketsByBucketIDResponse{Buckets: buckets} + + m := jsonpb.Marshaler{EmitDefaults: true, OrigName: true, EnumsAsInts: true} + if err = m.Marshal(&buf, grpcResponse); err != nil { + log.Errorf("failed to list buckets by ids", "error", err) + return + } + + w.Header().Set(ContentTypeHeader, ContentTypeJSONHeaderValue) + w.Write(buf.Bytes()) +} diff --git a/modular/gater/router.go b/modular/gater/router.go index ebbe92aee..405a72cc6 100644 --- a/modular/gater/router.go +++ b/modular/gater/router.go @@ -29,6 +29,8 @@ const ( getObjectMetaRouterName = "GetObjectMeta" getBucketMetaRouterName = "GetBucketMeta" getGroupListRouterName = "GetGroupList" + listBucketsByBucketIDRouterName = "ListBucketsByBucketID" + listObjectsByObjectIDRouterName = "ListObjectsByObjectID" ) const ( @@ -102,8 +104,16 @@ func (g *GateModular) RegisterHandler(router *mux.Router) { Methods(http.MethodGet). Queries(GetGroupListGroupQuery, ""). HandlerFunc(g.getGroupListHandler) - - // bucket list router, path style + router.Path("/"). + Name(listObjectsByObjectIDRouterName). + Methods(http.MethodPost). + Queries(ListObjectsByObjectID, ""). + HandlerFunc(g.listObjectsByObjectIDHandler) + router.Path("/"). + Name(listBucketsByBucketIDRouterName). + Methods(http.MethodPost). + Queries(ListBucketsByBucketID, ""). + HandlerFunc(g.listBucketsByBucketIDHandler) router.Path("/"). Name(getUserBucketsRouterName). Methods(http.MethodGet). diff --git a/modular/gater/router_test.go b/modular/gater/router_test.go index 2051565a6..bafabb435 100644 --- a/modular/gater/router_test.go +++ b/modular/gater/router_test.go @@ -217,6 +217,21 @@ func TestRouters(t *testing.T) { shouldMatch: true, wantedRouterName: getGroupListRouterName, }, + { + name: "List objects by object ids router", + router: gwRouter, + method: http.MethodPost, + url: scheme + testDomain + "/?" + ListObjectsByObjectID, + shouldMatch: true, + wantedRouterName: listObjectsByObjectIDRouterName, + }, { + name: "List buckets by bucket ids router", + router: gwRouter, + method: http.MethodPost, + url: scheme + testDomain + "/?" + ListBucketsByBucketID, + shouldMatch: true, + wantedRouterName: listBucketsByBucketIDRouterName, + }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { diff --git a/modular/metadata/metadata_bucket_service.go b/modular/metadata/metadata_bucket_service.go index 52b8fb090..60855d34f 100644 --- a/modular/metadata/metadata_bucket_service.go +++ b/modular/metadata/metadata_bucket_service.go @@ -391,3 +391,62 @@ func (r *MetadataModular) GfSpListBucketReadRecord( } return resp, nil } + +// GfSpListBucketsByBucketID list buckets by bucket ids +func (r *MetadataModular) GfSpListBucketsByBucketID(ctx context.Context, req *types.GfSpListBucketsByBucketIDRequest) (resp *types.GfSpListBucketsByBucketIDResponse, err error) { + var ( + buckets []*model.Bucket + ids []common.Hash + bucketsMap map[uint64]*types.Bucket + ) + + ids = make([]common.Hash, len(req.BucketIds)) + for i, id := range req.BucketIds { + ids[i] = common.BigToHash(math.NewUint(id).BigInt()) + } + + ctx = log.Context(ctx, req) + buckets, err = r.baseApp.GfBsDB().ListBucketsByBucketID(ids, req.IncludeRemoved) + if err != nil { + log.CtxErrorw(ctx, "failed to list buckets by bucket ids", "error", err) + return nil, err + } + + bucketsMap = make(map[uint64]*types.Bucket) + for _, id := range req.BucketIds { + bucketsMap[id] = nil + } + + for _, bucket := range buckets { + bucketsMap[bucket.BucketID.Big().Uint64()] = &types.Bucket{ + BucketInfo: &storage_types.BucketInfo{ + Owner: bucket.Owner.String(), + BucketName: bucket.BucketName, + Id: math.NewUintFromBigInt(bucket.BucketID.Big()), + SourceType: storage_types.SourceType(storage_types.SourceType_value[bucket.SourceType]), + CreateAt: bucket.CreateTime, + PaymentAddress: bucket.PaymentAddress.String(), + PrimarySpAddress: bucket.PrimarySpAddress.String(), + ChargedReadQuota: bucket.ChargedReadQuota, + Visibility: storage_types.VisibilityType(storage_types.VisibilityType_value[bucket.Visibility]), + BillingInfo: storage_types.BillingInfo{ + PriceTime: 0, + TotalChargeSize: 0, + SecondarySpObjectsSize: nil, + }, + BucketStatus: storage_types.BucketStatus(storage_types.BucketStatus_value[bucket.Status]), + }, + Removed: bucket.Removed, + DeleteAt: bucket.DeleteAt, + DeleteReason: bucket.DeleteReason, + Operator: bucket.Operator.String(), + CreateTxHash: bucket.CreateTxHash.String(), + UpdateTxHash: bucket.UpdateTxHash.String(), + UpdateAt: bucket.UpdateAt, + UpdateTime: bucket.UpdateTime, + } + } + resp = &types.GfSpListBucketsByBucketIDResponse{Buckets: bucketsMap} + log.CtxInfow(ctx, "succeed to list buckets by bucket ids") + return resp, nil +} diff --git a/modular/metadata/metadata_object_service.go b/modular/metadata/metadata_object_service.go index eae264c20..1ed5836f0 100644 --- a/modular/metadata/metadata_object_service.go +++ b/modular/metadata/metadata_object_service.go @@ -7,6 +7,7 @@ import ( "cosmossdk.io/math" "github.com/bnb-chain/greenfield/types/s3util" storage_types "github.com/bnb-chain/greenfield/x/storage/types" + "github.com/forbole/juno/v4/common" "github.com/bnb-chain/greenfield-storage-provider/modular/metadata/types" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" @@ -214,3 +215,59 @@ func (r *MetadataModular) GfSpGetObjectMeta(ctx context.Context, req *types.GfSp log.CtxInfo(ctx, "succeed to get object meta") return resp, nil } + +// GfSpListObjectsByObjectID list objects by object ids +func (r *MetadataModular) GfSpListObjectsByObjectID(ctx context.Context, req *types.GfSpListObjectsByObjectIDRequest) (resp *types.GfSpListObjectsByObjectIDResponse, err error) { + var ( + objects []*model.Object + ids []common.Hash + objectsMap map[uint64]*types.Object + ) + + ids = make([]common.Hash, len(req.ObjectIds)) + for i, id := range req.ObjectIds { + ids[i] = common.BigToHash(math.NewUint(id).BigInt()) + } + + objects, err = r.baseApp.GfBsDB().ListObjectsByObjectID(ids, req.IncludeRemoved) + if err != nil { + log.CtxErrorw(ctx, "failed to list objects by object ids", "error", err) + return nil, err + } + + objectsMap = make(map[uint64]*types.Object) + for _, id := range req.ObjectIds { + objectsMap[id] = nil + } + + for _, object := range objects { + objectsMap[object.ObjectID.Big().Uint64()] = &types.Object{ + ObjectInfo: &storage_types.ObjectInfo{ + Owner: object.Owner.String(), + BucketName: object.BucketName, + ObjectName: object.ObjectName, + Id: math.NewUintFromBigInt(object.ObjectID.Big()), + PayloadSize: object.PayloadSize, + ContentType: object.ContentType, + CreateAt: object.CreateTime, + ObjectStatus: storage_types.ObjectStatus(storage_types.ObjectStatus_value[object.ObjectStatus]), + RedundancyType: storage_types.RedundancyType(storage_types.RedundancyType_value[object.RedundancyType]), + SourceType: storage_types.SourceType(storage_types.SourceType_value[object.SourceType]), + Checksums: object.Checksums, + SecondarySpAddresses: object.SecondarySpAddresses, + Visibility: storage_types.VisibilityType(storage_types.VisibilityType_value[object.Visibility]), + }, + LockedBalance: object.LockedBalance.String(), + Removed: object.Removed, + DeleteAt: object.DeleteAt, + DeleteReason: object.DeleteReason, + Operator: object.Operator.String(), + CreateTxHash: object.CreateTxHash.String(), + UpdateTxHash: object.UpdateTxHash.String(), + SealTxHash: object.SealTxHash.String(), + } + } + resp = &types.GfSpListObjectsByObjectIDResponse{Objects: objectsMap} + log.CtxInfo(ctx, "succeed to list objects by object ids") + return resp, nil +} diff --git a/proto/modular/metadata/types/metadata.proto b/proto/modular/metadata/types/metadata.proto index c93604054..17c801163 100644 --- a/proto/modular/metadata/types/metadata.proto +++ b/proto/modular/metadata/types/metadata.proto @@ -373,6 +373,34 @@ message GfSpGetGroupListResponse { int64 count = 2; } +// GfSpGetBucketByBucketIDRequest is request type for the GfSpListBucketsByBucketID RPC method +message GfSpListBucketsByBucketIDRequest { + // bucket_id is the list of unique identifier for buckets + repeated uint64 bucket_ids = 1; + // include_removed indicates whether this request can get the removed buckets information + bool include_removed = 2; +} + +// GfSpGetBucketByBucketIDResponse is response type for the GfSpListBucketsByBucketID RPC method. +message GfSpListBucketsByBucketIDResponse { + // buckets defines the information of a bucket map + map buckets = 1; +} + +// GfSpListBucketsByBucketIDRequest is request type for the GfSpListObjectsByObjectID RPC method +message GfSpListObjectsByObjectIDRequest { + // object_ids is the list of unique identifier for objects + repeated uint64 object_ids = 1; + // include_removed indicates whether this request can get the removed objects information + bool include_removed = 2; +} + +// GfSpListObjectsByBucketIDRequest is response type for the GfSpListObjectsByObjectID RPC method. +message GfSpListObjectsByObjectIDResponse { + // objects defines the information of a object map + map objects = 1; +} + service GfSpMetadataService { rpc GfSpGetUserBuckets(GfSpGetUserBucketsRequest) returns (GfSpGetUserBucketsResponse) {} rpc GfSpListObjectsByBucketName(GfSpListObjectsByBucketNameRequest) returns (GfSpListObjectsByBucketNameResponse) {} @@ -391,4 +419,6 @@ service GfSpMetadataService { rpc GfSpListBucketReadRecord(GfSpListBucketReadRecordRequest) returns (GfSpListBucketReadRecordResponse) {} rpc GfSpQueryUploadProgress(GfSpQueryUploadProgressRequest) returns (GfSpQueryUploadProgressResponse) {} rpc GfSpGetGroupList(GfSpGetGroupListRequest) returns (GfSpGetGroupListResponse) {} + rpc GfSpListBucketsByBucketID(GfSpListBucketsByBucketIDRequest) returns (GfSpListBucketsByBucketIDResponse) {} + rpc GfSpListObjectsByObjectID(GfSpListObjectsByObjectIDRequest) returns (GfSpListObjectsByObjectIDResponse) {} } diff --git a/store/bsdb/bucket.go b/store/bsdb/bucket.go index 3633583e9..6908b6d2f 100644 --- a/store/bsdb/bucket.go +++ b/store/bsdb/bucket.go @@ -140,3 +140,23 @@ func (b *BsDBImpl) GetBucketMetaByName(bucketName string, includePrivate bool) ( return bucketFullMeta, err } + +// ListBucketsByBucketID list buckets by bucket ids +func (b *BsDBImpl) ListBucketsByBucketID(ids []common.Hash, includeRemoved bool) ([]*Bucket, error) { + var ( + buckets []*Bucket + err error + filters []func(*gorm.DB) *gorm.DB + ) + + if !includeRemoved { + filters = append(filters, RemovedFilter(includeRemoved)) + } + + err = b.db.Table((&Bucket{}).TableName()). + Select("*"). + Where("bucket_id in (?)", ids). + Scopes(filters...). + Find(&buckets).Error + return buckets, err +} diff --git a/store/bsdb/common.go b/store/bsdb/common.go index 150c746b1..c4d19ed52 100644 --- a/store/bsdb/common.go +++ b/store/bsdb/common.go @@ -43,3 +43,13 @@ const ( // BsDBSwitchedDataBase defines env variable name for switched block syncer db database. BsDBSwitchedDataBase = "BS_DB_SWITCHED_DATABASE" ) + +// ObjectIDs represents the request of list object by ids +type ObjectIDs struct { + IDs []uint64 `json:"ids"` +} + +// BucketIDs represents the request of list bucket by ids +type BucketIDs struct { + IDs []uint64 `json:"ids"` +} diff --git a/store/bsdb/database.go b/store/bsdb/database.go index 0a874e1c0..8a396e593 100644 --- a/store/bsdb/database.go +++ b/store/bsdb/database.go @@ -44,6 +44,10 @@ type Metadata interface { GetBucketMetaByName(bucketName string, includePrivate bool) (*BucketFullMeta, error) // ListGroupsByNameAndSourceType get groups list by specific parameters ListGroupsByNameAndSourceType(name, prefix, sourceType string, limit, offset int, includeRemoved bool) ([]*Group, int64, error) + // ListObjectsByObjectID list objects by object ids + ListObjectsByObjectID(ids []common.Hash, includeRemoved bool) ([]*Object, error) + // ListBucketsByBucketID list buckets by bucket ids + ListBucketsByBucketID(ids []common.Hash, includeRemoved bool) ([]*Bucket, error) } // BSDB contains all the methods required by block syncer database diff --git a/store/bsdb/object.go b/store/bsdb/object.go index e2b27f4ad..8ea811135 100644 --- a/store/bsdb/object.go +++ b/store/bsdb/object.go @@ -1,6 +1,7 @@ package bsdb import ( + "github.com/forbole/juno/v4/common" "gorm.io/gorm" ) @@ -115,3 +116,23 @@ func (b *BsDBImpl) GetObjectByName(objectName string, bucketName string, include Take(&object).Error return object, err } + +// ListObjectsByObjectID list objects by object ids +func (b *BsDBImpl) ListObjectsByObjectID(ids []common.Hash, includeRemoved bool) ([]*Object, error) { + var ( + objects []*Object + err error + filters []func(*gorm.DB) *gorm.DB + ) + + if !includeRemoved { + filters = append(filters, RemovedFilter(includeRemoved)) + } + + err = b.db.Table((&Object{}).TableName()). + Select("*"). + Where("object_id in (?)", ids). + Scopes(filters...). + Find(&objects).Error + return objects, err +} From 04f9d16612fed67da46da0a60bc8596f19a27aa7 Mon Sep 17 00:00:00 2001 From: joeycli Date: Mon, 12 Jun 2023 18:15:04 +0800 Subject: [PATCH 03/78] refactor: task queue (#581) --- base/gfsptqueue/queue.go | 228 +++++++------------------ base/gfsptqueue/queue_limit.go | 242 +++++++-------------------- base/types/gfsptask/task_key.go | 43 +++-- modular/approver/approve_task.go | 13 +- modular/approver/approver.go | 51 ++++-- modular/approver/approver_options.go | 4 +- modular/gater/object_handler.go | 7 + 7 files changed, 199 insertions(+), 389 deletions(-) diff --git a/base/gfsptqueue/queue.go b/base/gfsptqueue/queue.go index ed856f5b8..9222231a2 100644 --- a/base/gfsptqueue/queue.go +++ b/base/gfsptqueue/queue.go @@ -2,7 +2,6 @@ package gfsptqueue import ( "net/http" - "strings" "sync" "time" @@ -11,6 +10,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/taskqueue" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" + "github.com/bnb-chain/greenfield-storage-provider/util/maps" ) const ( @@ -26,11 +26,10 @@ var _ taskqueue.TQueue = &GfSpTQueue{} var _ taskqueue.TQueueOnStrategy = &GfSpTQueue{} type GfSpTQueue struct { - name string - tasks []coretask.Task - indexer map[coretask.TKey]int - cap int - mux sync.RWMutex + name string + tasks map[coretask.TKey]coretask.Task + cap int + mux sync.RWMutex gcFunc func(task2 coretask.Task) bool filterFunc func(task2 coretask.Task) bool @@ -39,10 +38,9 @@ type GfSpTQueue struct { func NewGfSpTQueue(name string, cap int) taskqueue.TQueueOnStrategy { metrics.QueueCapGauge.WithLabelValues(name).Set(float64(cap)) return &GfSpTQueue{ - name: name, - cap: cap, - tasks: make([]coretask.Task, 0), - indexer: make(map[coretask.TKey]int), + name: name, + cap: cap, + tasks: make(map[coretask.TKey]coretask.Task), } } @@ -60,7 +58,6 @@ func (t *GfSpTQueue) Cap() int { // Has returns an indicator whether the task in queue. func (t *GfSpTQueue) Has(key coretask.TKey) bool { - // maybe gc task, need RWLock, not RLock t.mux.Lock() defer t.mux.Unlock() return t.has(key) @@ -68,74 +65,20 @@ func (t *GfSpTQueue) Has(key coretask.TKey) bool { // Top returns the top task in the queue, if the queue empty, returns nil. func (t *GfSpTQueue) Top() coretask.Task { - var gcTasks []coretask.Task - // maybe gc task, need RWLock, not RLock t.mux.Lock() - defer func() { - defer t.mux.Unlock() - for _, task := range gcTasks { - t.delete(task) - } - }() - - if len(t.tasks) == 0 { - return nil - } - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc != nil { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - continue - } - } - if t.filterFunc != nil { - if t.filterFunc(t.tasks[i]) { - return t.tasks[i] - } - } else { - return t.tasks[i] - } - } - return nil + defer t.mux.Unlock() + return t.top() } // Pop pops and returns the top task in queue, if the queue empty, returns nil. func (t *GfSpTQueue) Pop() coretask.Task { - var gcTasks []coretask.Task - var popTask coretask.Task - // maybe trigger retire task, need RWLock not RLock t.mux.Lock() - defer func() { - defer t.mux.Unlock() - for _, task := range gcTasks { - t.delete(task) - } - if popTask != nil { - t.delete(popTask) - } - }() - - if len(t.tasks) == 0 { - return nil - } - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc != nil { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - continue - } - } - if t.filterFunc != nil { - if t.filterFunc(t.tasks[i]) { - popTask = t.tasks[i] - return popTask - } - } else { - popTask = t.tasks[i] - return popTask - } + defer t.mux.Unlock() + task := t.top() + if task != nil { + t.delete(task) } - return nil + return task } // PopByKey pops the task by the task key, if the task does not exist , returns nil. @@ -145,32 +88,10 @@ func (t *GfSpTQueue) PopByKey(key coretask.TKey) coretask.Task { if !t.has(key) { return nil } - idx, ok := t.indexer[key] + task, ok := t.tasks[key] if !ok { - log.Errorw("[BUG] no task in queue after has check", "queue", t.name, - "task_key", key) return nil } - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return nil - } - } - task := t.tasks[idx] - if strings.EqualFold(task.Key().String(), key.String()) { - log.Errorw("[BUG] index mismatch task", "queue", t.name, - "index_key", key.String(), "task_key", task.Key().String()) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return nil - } - task = t.tasks[idx] - } t.delete(task) return task } @@ -183,25 +104,23 @@ func (t *GfSpTQueue) Push(task coretask.Task) error { return ErrTaskRepeated } if t.exceed() { - var gcTasks []coretask.Task + if t.gcFunc == nil { + log.Warnw("queue exceed", "queue", t.name, "cap", t.cap, "len", len(t.tasks)) + return ErrTaskQueueExceed + } clear := false - if t.gcFunc != nil { - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - clear = true - // only retire one task - break - } + keys := maps.SortKeys(t.tasks) + for _, key := range keys { + if t.gcFunc(t.tasks[key]) { + t.delete(t.tasks[key]) + clear = true + // only retire one task + break } } if !clear { log.Warnw("queue exceed", "queue", t.name, "cap", t.cap, "len", len(t.tasks)) return ErrTaskQueueExceed - } else { - for _, gcTask := range gcTasks { - t.delete(gcTask) - } } } t.add(task) @@ -213,89 +132,58 @@ func (t *GfSpTQueue) exceed() bool { } func (t *GfSpTQueue) add(task coretask.Task) { - if t.has(task.Key()) { + if task == nil || t.has(task.Key()) { return } - t.tasks = append(t.tasks, task) - t.indexer[task.Key()] = len(t.tasks) - 1 + t.tasks[task.Key()] = task metrics.QueueSizeGauge.WithLabelValues(t.name).Set(float64(len(t.tasks))) } func (t *GfSpTQueue) delete(task coretask.Task) { - if !t.has(task.Key()) { - return - } - idx, ok := t.indexer[task.Key()] - if !ok { - log.Errorw("[BUG] no task in queue after has check", "queue", t.name, - "task_key", task.Key().String()) + if task == nil || !t.has(task.Key()) { return } defer func() { - delete(t.indexer, task.Key()) + metrics.TaskInQueueTimeHistogram.WithLabelValues(t.name).Observe( + time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) metrics.QueueSizeGauge.WithLabelValues(t.name).Set(float64(len(t.tasks))) }() - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx) - t.reset() - idx, ok = t.indexer[task.Key()] - if !ok { - return - } - } - t.tasks = append(t.tasks[0:idx], t.tasks[idx+1:]...) - t.reset() - metrics.TaskInQueueTimeHistogram.WithLabelValues(t.name).Observe( - time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + delete(t.tasks, task.Key()) } func (t *GfSpTQueue) has(key coretask.TKey) bool { - if len(t.tasks) != len(t.indexer) { - log.Errorw("[BUG] index length mismatch task length", "queue", t.name, - "index_length", len(t.indexer), "task_length", len(t.tasks)) - t.reset() - } - idx, ok := t.indexer[key] - if ok { - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx, "key", key) - t.reset() - } - idx, ok = t.indexer[key] - if !ok { + task, ok := t.tasks[key] + if ok && t.gcFunc != nil { + if t.gcFunc(task) { + delete(t.tasks, task.Key()) return false } - task := t.tasks[idx] - if !strings.EqualFold(task.Key().String(), key.String()) { - log.Errorw("[BUG] index mismatch task", "queue", t.name, - "index_key", key.String(), "task_key", task.Key().String()) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return false - } - task = t.tasks[idx] - } + } + return ok +} + +func (t *GfSpTQueue) top() coretask.Task { + tasksCreateMap := make(map[int64]coretask.Task) + for _, task := range t.tasks { + tasksCreateMap[task.GetCreateTime()] = task + } + keys := maps.SortKeys(tasksCreateMap) + for _, key := range keys { + task := tasksCreateMap[key] if t.gcFunc != nil { if t.gcFunc(task) { - delete(t.indexer, task.Key()) - t.tasks = append(t.tasks[0:idx], t.tasks[idx+1:]...) - t.reset() - return false + t.delete(t.tasks[task.Key()]) } } - return true - } - return false -} - -func (t *GfSpTQueue) reset() { - t.indexer = make(map[coretask.TKey]int) - for i, task := range t.tasks { - t.indexer[task.Key()] = i + if t.filterFunc != nil { + if t.filterFunc(task) { + return task + } + } else { + return task + } } + return nil } // SetFilterTaskStrategy sets the callback func to filter task for popping or topping. diff --git a/base/gfsptqueue/queue_limit.go b/base/gfsptqueue/queue_limit.go index 0086e2315..e89f80fff 100644 --- a/base/gfsptqueue/queue_limit.go +++ b/base/gfsptqueue/queue_limit.go @@ -1,7 +1,6 @@ package gfsptqueue import ( - "strings" "sync" "time" @@ -10,17 +9,17 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/taskqueue" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" + "github.com/bnb-chain/greenfield-storage-provider/util/maps" ) var _ taskqueue.TQueueWithLimit = &GfSpTQueueWithLimit{} var _ taskqueue.TQueueOnStrategyWithLimit = &GfSpTQueueWithLimit{} type GfSpTQueueWithLimit struct { - name string - tasks []coretask.Task - indexer map[coretask.TKey]int - cap int - mux sync.RWMutex + name string + tasks map[coretask.TKey]coretask.Task + cap int + mux sync.RWMutex gcFunc func(task2 coretask.Task) bool filterFunc func(task2 coretask.Task) bool @@ -29,10 +28,9 @@ type GfSpTQueueWithLimit struct { func NewGfSpTQueueWithLimit(name string, cap int) taskqueue.TQueueOnStrategyWithLimit { metrics.QueueCapGauge.WithLabelValues(name).Set(float64(cap)) return &GfSpTQueueWithLimit{ - name: name, - cap: cap, - tasks: make([]coretask.Task, 0), - indexer: make(map[coretask.TKey]int), + name: name, + cap: cap, + tasks: make(map[coretask.TKey]coretask.Task), } } @@ -57,78 +55,22 @@ func (t *GfSpTQueueWithLimit) Has(key coretask.TKey) bool { } func (t *GfSpTQueueWithLimit) TopByLimit(limit corercmgr.Limit) coretask.Task { - var gcTasks []coretask.Task - // maybe gc task, need RWLock, not RLock + // maybe trigger gc task, need RWLock not RLock t.mux.Lock() - defer func() { - defer t.mux.Unlock() - for _, task := range gcTasks { - t.delete(task) - } - }() - - if len(t.tasks) == 0 { - return nil - } - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc != nil { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - continue - } - } - if limit.NotLess(t.tasks[i].EstimateLimit()) { - if t.filterFunc != nil { - if t.filterFunc(t.tasks[i]) { - return t.tasks[i] - } - } else { - return t.tasks[i] - } - } - } - return nil + defer t.mux.Unlock() + return t.topByLimit(limit) } // PopByLimit pops and returns the top task that the LimitEstimate less than the param in the queue. func (t *GfSpTQueueWithLimit) PopByLimit(limit corercmgr.Limit) coretask.Task { - var gcTasks []coretask.Task - var popTask coretask.Task // maybe trigger gc task, need RWLock not RLock t.mux.Lock() - defer func() { - defer t.mux.Unlock() - for _, task := range gcTasks { - t.delete(task) - } - if popTask != nil { - t.delete(popTask) - } - }() - - if len(t.tasks) == 0 { - return nil - } - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc != nil { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - continue - } - } - if limit.NotLess(t.tasks[i].EstimateLimit()) { - if t.filterFunc != nil { - if t.filterFunc(t.tasks[i]) { - popTask = t.tasks[i] - return popTask - } - } else { - popTask = t.tasks[i] - return popTask - } - } + defer t.mux.Unlock() + task := t.topByLimit(limit) + if task != nil { + t.delete(task) } - return nil + return task } // PopByKey pops the task by the task key, if the task does not exist , returns nil. @@ -138,32 +80,10 @@ func (t *GfSpTQueueWithLimit) PopByKey(key coretask.TKey) coretask.Task { if !t.has(key) { return nil } - idx, ok := t.indexer[key] + task, ok := t.tasks[key] if !ok { - log.Errorw("[BUG] no task in queue after has check", "queue", t.name, - "task_key", key) return nil } - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return nil - } - } - task := t.tasks[idx] - if strings.EqualFold(task.Key().String(), key.String()) { - log.Errorw("[BUG] index mismatch task", "queue", t.name, - "index_key", key.String(), "task_key", task.Key().String()) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return nil - } - task = t.tasks[idx] - } t.delete(task) return task } @@ -172,34 +92,27 @@ func (t *GfSpTQueueWithLimit) PopByKey(key coretask.TKey) coretask.Task { func (t *GfSpTQueueWithLimit) Push(task coretask.Task) error { t.mux.Lock() defer t.mux.Unlock() - if idx, ok := t.indexer[task.Key()]; ok { - if idx >= len(t.tasks) { - delete(t.indexer, task.Key()) - } else { - log.Warnw("push repeat task", "queue", t.name, "task", task.Key()) - return ErrTaskRepeated - } + if t.has(task.Key()) { + return ErrTaskRepeated } if t.exceed() { - var gcTasks []coretask.Task + if t.gcFunc == nil { + log.Warnw("queue exceed", "queue", t.name, "cap", t.cap, "len", len(t.tasks)) + return ErrTaskQueueExceed + } clear := false - if t.gcFunc != nil { - for i := len(t.tasks) - 1; i >= 0; i-- { - if t.gcFunc(t.tasks[i]) { - gcTasks = append(gcTasks, t.tasks[i]) - clear = true - // only retire one task - break - } + keys := maps.SortKeys(t.tasks) + for _, key := range keys { + if t.gcFunc(t.tasks[key]) { + t.delete(t.tasks[key]) + clear = true + // only retire one task + break } } if !clear { log.Warnw("queue exceed", "queue", t.name, "cap", t.cap, "len", len(t.tasks)) return ErrTaskQueueExceed - } else { - for _, gcTask := range gcTasks { - t.delete(gcTask) - } } } t.add(task) @@ -211,89 +124,60 @@ func (t *GfSpTQueueWithLimit) exceed() bool { } func (t *GfSpTQueueWithLimit) add(task coretask.Task) { - if t.has(task.Key()) { + if task == nil || t.has(task.Key()) { return } - t.tasks = append(t.tasks, task) - t.indexer[task.Key()] = len(t.tasks) - 1 + t.tasks[task.Key()] = task metrics.QueueSizeGauge.WithLabelValues(t.name).Set(float64(len(t.tasks))) } func (t *GfSpTQueueWithLimit) delete(task coretask.Task) { - if !t.has(task.Key()) { - return - } - idx, ok := t.indexer[task.Key()] - if !ok { - log.Errorw("[BUG] no task in queue after has check", "queue", t.name, - "task_key", task.Key().String()) + if task == nil || !t.has(task.Key()) { return } defer func() { - delete(t.indexer, task.Key()) + metrics.TaskInQueueTimeHistogram.WithLabelValues(t.name).Observe( + time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) metrics.QueueSizeGauge.WithLabelValues(t.name).Set(float64(len(t.tasks))) }() - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx) - t.reset() - idx, ok = t.indexer[task.Key()] - if !ok { - return - } - } - t.tasks = append(t.tasks[0:idx], t.tasks[idx+1:]...) - t.reset() - metrics.TaskInQueueTimeHistogram.WithLabelValues(t.name).Observe( - time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + delete(t.tasks, task.Key()) } func (t *GfSpTQueueWithLimit) has(key coretask.TKey) bool { - if len(t.tasks) != len(t.indexer) { - log.Errorw("[BUG] index length mismatch task length", "queue", t.name, - "index_length", len(t.indexer), "task_length", len(t.tasks)) - t.reset() - } - idx, ok := t.indexer[key] - if ok { - if idx >= len(t.tasks) { - log.Errorw("[BUG] index out of bounds", "queue", t.name, - "len", len(t.tasks), "index", idx, "key", key) - t.reset() - } - idx, ok = t.indexer[key] - if !ok { + task, ok := t.tasks[key] + if ok && t.gcFunc != nil { + if t.gcFunc(task) { + delete(t.tasks, task.Key()) return false } - task := t.tasks[idx] - if !strings.EqualFold(task.Key().String(), key.String()) { - log.Errorw("[BUG] index mismatch task", "queue", t.name, - "index_key", key.String(), "task_key", task.Key().String()) - t.reset() - idx, ok = t.indexer[key] - if !ok { - return false - } - task = t.tasks[idx] - } + } + return ok +} + +func (t *GfSpTQueueWithLimit) topByLimit(limit corercmgr.Limit) coretask.Task { + tasksCreateMap := make(map[int64]coretask.Task) + for _, task := range t.tasks { + tasksCreateMap[task.GetCreateTime()] = task + } + keys := maps.SortKeys(tasksCreateMap) + for _, key := range keys { + task := tasksCreateMap[key] if t.gcFunc != nil { if t.gcFunc(task) { - delete(t.indexer, task.Key()) - t.tasks = append(t.tasks[0:idx], t.tasks[idx+1:]...) - t.reset() - return false + t.delete(t.tasks[task.Key()]) + } + } + if limit.NotLess(task.EstimateLimit()) { + if t.filterFunc != nil { + if t.filterFunc(task) { + return task + } + } else { + return task } } - return true - } - return false -} - -func (t *GfSpTQueueWithLimit) reset() { - t.indexer = make(map[coretask.TKey]int) - for i, task := range t.tasks { - t.indexer[task.Key()] = i } + return nil } // SetFilterTaskStrategy sets the callback func to filter task for popping or topping. diff --git a/base/types/gfsptask/task_key.go b/base/types/gfsptask/task_key.go index 45b7eb15a..e68b11c2a 100644 --- a/base/types/gfsptask/task_key.go +++ b/base/types/gfsptask/task_key.go @@ -28,60 +28,69 @@ var ( ) func GfSpCreateBucketApprovalTaskKey(bucket string) task.TKey { - return task.TKey(KeyPrefixGfSpCreateBucketApprovalTask + CombineKey(bucket)) + return task.TKey(KeyPrefixGfSpCreateBucketApprovalTask + CombineKey("bucket:"+bucket)) } func GfSpCreateObjectApprovalTaskKey(bucket, object string) task.TKey { - return task.TKey(KeyPrefixGfSpCreateObjectApprovalTask + CombineKey(bucket, object)) + return task.TKey(KeyPrefixGfSpCreateObjectApprovalTask + + CombineKey("bucket:"+bucket, "object:"+object)) } func GfSpReplicatePieceApprovalTaskKey(bucket, object, id string) task.TKey { - return task.TKey(KeyPrefixGfSpReplicatePieceApprovalTask + CombineKey(bucket, object, id)) + return task.TKey(KeyPrefixGfSpReplicatePieceApprovalTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id)) } func GfSpDownloadObjectTaskKey(bucket, object, id string, low, high int64) task.TKey { - return task.TKey(KeyPrefixGfSpDownloadObjectTask + CombineKey(bucket, object, id, - "low:"+fmt.Sprint(low), "high:"+fmt.Sprint(high))) + return task.TKey(KeyPrefixGfSpDownloadObjectTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id, + "low:"+fmt.Sprint(low), "high:"+fmt.Sprint(high))) } func GfSpDownloadPieceTaskKey(bucket, object, pieceKey string, pieceOffset, pieceLength uint64) task.TKey { - return task.TKey(KeyPrefixGfSpDownloadPieceTask + CombineKey(bucket, object, pieceKey, - "offset:"+fmt.Sprint(pieceOffset), "length:"+fmt.Sprint(pieceLength))) + return task.TKey(KeyPrefixGfSpDownloadPieceTask + + CombineKey("bucket:"+bucket, "object:"+object, "piece:"+pieceKey, + "offset:"+fmt.Sprint(pieceOffset), "length:"+fmt.Sprint(pieceLength))) } func GfSpChallengePieceTaskKey(bucket, object, id string, sIdx uint32, rIdx int32, user string) task.TKey { - return task.TKey(KeyPrefixGfSpChallengeTask + CombineKey(bucket, object, id, - "sIdx:"+fmt.Sprint(sIdx), "rIdx:"+fmt.Sprint(rIdx), user)) + return task.TKey(KeyPrefixGfSpChallengeTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id, + "sIdx:"+fmt.Sprint(sIdx), "rIdx:"+fmt.Sprint(rIdx), user)) } func GfSpUploadObjectTaskKey(bucket, object, id string) task.TKey { - return task.TKey(KeyPrefixGfSpUploadObjectTask + CombineKey(bucket, object, id)) + return task.TKey(KeyPrefixGfSpUploadObjectTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id)) } func GfSpReplicatePieceTaskKey(bucket, object, id string) task.TKey { - return task.TKey(KeyPrefixGfSpReplicatePieceTask + CombineKey(bucket, object, id)) + return task.TKey(KeyPrefixGfSpReplicatePieceTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id)) } func GfSpSealObjectTaskKey(bucket, object, id string) task.TKey { - return task.TKey(KeyPrefixGfSpSealObjectTask + CombineKey(bucket, object, id)) + return task.TKey(KeyPrefixGfSpSealObjectTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id)) } func GfSpReceivePieceTaskKey(bucket, object, id string, rIdx uint32, pIdx int32) task.TKey { - return task.TKey(KeyPrefixGfSpReceivePieceTask + CombineKey(bucket, object, id, - "rIdx:"+fmt.Sprint(rIdx), "pIdx:"+fmt.Sprint(pIdx))) + return task.TKey(KeyPrefixGfSpReceivePieceTask + + CombineKey("bucket:"+bucket, "object:"+object, "id:"+id, + "rIdx:"+fmt.Sprint(rIdx), "pIdx:"+fmt.Sprint(pIdx))) } func GfSpGCObjectTaskKey(start, end uint64, time int64) task.TKey { return task.TKey(KeyPrefixGfSpGCObjectTask + CombineKey( - fmt.Sprint(start), fmt.Sprint(end), fmt.Sprint(time))) + "start"+fmt.Sprint(start), "end"+fmt.Sprint(end), "time"+fmt.Sprint(time))) } func GfSpGCZombiePieceTaskKey(time int64) task.TKey { - return task.TKey(KeyPrefixGfSpGCZombiePieceTask + CombineKey(fmt.Sprint(time))) + return task.TKey(KeyPrefixGfSpGCZombiePieceTask + CombineKey("time"+fmt.Sprint(time))) } func GfSpGfSpGCMetaTaskKey(time int64) task.TKey { - return task.TKey(KeyPrefixGfSpGfSpGCMetaTask + CombineKey(fmt.Sprint(time))) + return task.TKey(KeyPrefixGfSpGfSpGCMetaTask + CombineKey("time"+fmt.Sprint(time))) } func CombineKey(field ...string) string { diff --git a/modular/approver/approve_task.go b/modular/approver/approve_task.go index 9df5f1de1..e4c60ff77 100644 --- a/modular/approver/approve_task.go +++ b/modular/approver/approve_task.go @@ -17,7 +17,6 @@ var ( ErrDanglingPointer = gfsperrors.Register(module.ApprovalModularName, http.StatusBadRequest, 10001, "OoooH.... request lost") ErrExceedBucketNumber = gfsperrors.Register(module.ApprovalModularName, http.StatusNotAcceptable, 10002, "account buckets exceed the limit") ErrSigner = gfsperrors.Register(module.ApprovalModularName, http.StatusInternalServerError, 11001, "server slipped away, try again later") - ErrConsensus = gfsperrors.Register(module.ApprovalModularName, http.StatusInternalServerError, 15001, "server slipped away, try again later") ) func (a *ApprovalModular) PreCreateBucketApproval(ctx context.Context, task coretask.ApprovalCreateBucketTask) error { @@ -71,13 +70,9 @@ func (a *ApprovalModular) HandleCreateBucketApprovalTask(ctx context.Context, ta // begin to sign the new approval task startQueryChain := time.Now() - currentHeight, err = a.baseApp.Consensus().CurrentHeight(ctx) + currentHeight = a.GetCurrentBlockHeight() metrics.GnfdChainHistogram.WithLabelValues("query_current_height_in_create_bucket_approval"). Observe(time.Since(startQueryChain).Seconds()) - if err != nil { - log.CtxErrorw(ctx, "failed to get current height", "error", err) - return false, ErrConsensus - } task.SetExpiredHeight(currentHeight + a.bucketApprovalTimeoutHeight) startSignApproval := time.Now() signature, err = a.baseApp.GfSpClient().SignCreateBucketApproval(ctx, task.GetCreateBucketInfo()) @@ -139,13 +134,9 @@ func (a *ApprovalModular) HandleCreateObjectApprovalTask(ctx context.Context, ta // begin to sign the new approval task startQueryChain := time.Now() - currentHeight, err = a.baseApp.Consensus().CurrentHeight(ctx) + currentHeight = a.GetCurrentBlockHeight() metrics.GnfdChainHistogram.WithLabelValues("query_current_height_in_create_object_approval"). Observe(time.Since(startQueryChain).Seconds()) - if err != nil { - log.CtxErrorw(ctx, "failed to get current height", "error", err) - return false, ErrConsensus - } task.SetExpiredHeight(currentHeight + a.objectApprovalTimeoutHeight) startSignApproval := time.Now() signature, err = a.baseApp.GfSpClient().SignCreateObjectApproval(ctx, task.GetCreateObjectInfo()) diff --git a/modular/approver/approver.go b/modular/approver/approver.go index 5759f502c..afb330a5a 100644 --- a/modular/approver/approver.go +++ b/modular/approver/approver.go @@ -2,6 +2,8 @@ package approver import ( "context" + "sync/atomic" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" "github.com/bnb-chain/greenfield-storage-provider/core/module" @@ -11,6 +13,12 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/pkg/log" ) +const ( + DefaultBlockInterval = 2 + + DefaultApprovalExpiredTimeout = int64(DefaultBlockInterval * 20) +) + var _ module.Approver = &ApprovalModular{} type ApprovalModular struct { @@ -19,6 +27,7 @@ type ApprovalModular struct { bucketQueue taskqueue.TQueueOnStrategy objectQueue taskqueue.TQueueOnStrategy + currentBlockHeight uint64 // defines the max bucket number per account, approver refuses the ask approval // request if account own the bucket number greater the value accountBucketNumber int64 @@ -40,6 +49,7 @@ func (a *ApprovalModular) Start(ctx context.Context) error { return err } a.scope = scope + go a.eventLoop(ctx) return nil } @@ -64,21 +74,42 @@ func (a *ApprovalModular) ReleaseResource(ctx context.Context, span rcmgr.Resour span.Done() } +func (a *ApprovalModular) eventLoop(ctx context.Context) { + getCurrentBlockHeightTicker := time.NewTicker(time.Duration(DefaultBlockInterval) * time.Second) + for { + select { + case <-ctx.Done(): + return + case <-getCurrentBlockHeightTicker.C: + current, err := a.baseApp.Consensus().CurrentHeight(context.Background()) + if err != nil { + log.CtxErrorw(ctx, "failed to get current block number", "error", err) + } + a.SetCurrentBlockHeight(current) + } + } +} + // GCApprovalQueue defines the strategy of gc approval queue when the queue is full. // if the approval is expired, it can be deleted. func (a *ApprovalModular) GCApprovalQueue(qTask task.Task) bool { task := qTask.(task.ApprovalTask) - ctx := log.WithValue(context.Background(), log.CtxKeyTask, task.Key().String()) - current, err := a.baseApp.Consensus().CurrentHeight(context.Background()) - if err != nil { - log.CtxErrorw(ctx, "failed to get current height", "error", err) - return false - } - if task.GetExpiredHeight() < current { - log.CtxDebugw(ctx, "expire approval task", "info", task.Info()) + if task.GetCreateTime()+DefaultApprovalExpiredTimeout < time.Now().Unix() { + log.Debugw("expire approval task", "info", task.Info()) return true } - log.CtxDebugw(ctx, "approval task not expired", "current_height", current, - "expired_height", task.GetExpiredHeight()) + log.Debugw("approval task not expired", "expired_height", task.GetExpiredHeight(), + "current_height", a.GetCurrentBlockHeight()) return false } + +func (a *ApprovalModular) GetCurrentBlockHeight() uint64 { + return atomic.LoadUint64(&a.currentBlockHeight) +} + +func (a *ApprovalModular) SetCurrentBlockHeight(height uint64) { + if height <= a.GetCurrentBlockHeight() { + return + } + atomic.StoreUint64(&a.currentBlockHeight, height) +} diff --git a/modular/approver/approver_options.go b/modular/approver/approver_options.go index cce93e354..60de1a678 100644 --- a/modular/approver/approver_options.go +++ b/modular/approver/approver_options.go @@ -12,10 +12,10 @@ const ( DefaultAccountBucketNumber = 100 // DefaultBucketApprovalTimeoutHeight defines the default value of timeout // height for creating bucket approval - DefaultBucketApprovalTimeoutHeight uint64 = 10 + DefaultBucketApprovalTimeoutHeight uint64 = 100 // DefaultObjectApprovalTimeoutHeight defines the default value of timeout // // height for creating object approval - DefaultObjectApprovalTimeoutHeight uint64 = 10 + DefaultObjectApprovalTimeoutHeight uint64 = 100 // DefaultCreateBucketApprovalParallel defines the default value of parallel // for approved create bucket per approver DefaultCreateBucketApprovalParallel = 10240 diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index 617140c71..c236a15f1 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -10,6 +10,7 @@ import ( "time" "github.com/bnb-chain/greenfield-storage-provider/modular/downloader" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "github.com/bnb-chain/greenfield/types/s3util" sdk "github.com/cosmos/cosmos-sdk/types" @@ -49,8 +50,10 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { return } if reqCtx.NeedVerifyAuthorizer() { + startAuthirzerTime := time.Now() authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), coremodule.AuthOpTypePutObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName) + metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_authorizer").Observe(time.Since(startAuthirzerTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) return @@ -62,7 +65,9 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { } } + startGetObjectInfoTime := time.Now() objectInfo, err = g.baseApp.Consensus().QueryObjectInfo(reqCtx.Context(), reqCtx.bucketName, reqCtx.objectName) + metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_get_object_info").Observe(time.Since(startGetObjectInfoTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get object info from consensus", "error", err) err = ErrConsensus @@ -73,7 +78,9 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { err = ErrInvalidPayloadSize return } + startGetStorageParamTime := time.Now() params, err = g.baseApp.Consensus().QueryStorageParamsByTimestamp(reqCtx.Context(), objectInfo.GetCreateAt()) + metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_get_storage_param").Observe(time.Since(startGetStorageParamTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get storage params from consensus", "error", err) err = ErrConsensus From 225ff2c023e2905b1a385245e5e6e368762cc8cf Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Mon, 12 Jun 2023 21:27:20 +0800 Subject: [PATCH 04/78] build: fix compile warning (#578) --- go.mod | 2 +- go.sum | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e2680c725..447ff9186 100644 --- a/go.mod +++ b/go.mod @@ -106,7 +106,7 @@ require ( github.com/peterh/liner v1.2.0 // indirect github.com/prometheus/tsdb v0.10.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rjeczalik/notify v0.9.1 // indirect + github.com/rjeczalik/notify v0.9.3 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/rs/cors v1.8.3 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect diff --git a/go.sum b/go.sum index fc7e2dfb2..47a06faa9 100644 --- a/go.sum +++ b/go.sum @@ -1396,8 +1396,9 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= +github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1864,6 +1865,7 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From f5c8ae152606a9e3048af4e5132a30cc6b9df776 Mon Sep 17 00:00:00 2001 From: joeycli Date: Tue, 13 Jun 2023 10:12:14 +0800 Subject: [PATCH 05/78] fix: signer panic (#583) --- modular/signer/signer_client.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/modular/signer/signer_client.go b/modular/signer/signer_client.go index ba4bccba4..d7b692759 100644 --- a/modular/signer/signer_client.go +++ b/modular/signer/signer_client.go @@ -258,9 +258,6 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( } client.sealAccNonce = nonce } - if strings.Contains(err.Error(), "Object already sealed") { - return nil, nil - } return nil, ErrRejectUnSealObjectOnChain } From d88eedccbf9d065684026aa916a1a727e9077a16 Mon Sep 17 00:00:00 2001 From: Hao Feng <3140102270@zju.edu.cn> Date: Tue, 13 Jun 2023 15:03:29 +0800 Subject: [PATCH 06/78] feat: add ldfs piecestore (#545) * feat: add ldfs piecestore * feat: add comments in ldfs piecestore --------- Co-authored-by: fenghao --- store/piecestore/storage/const.go | 2 + store/piecestore/storage/ldfs.go | 95 ++++++++++++++++++++++ store/piecestore/storage/object_storage.go | 1 + 3 files changed, 98 insertions(+) create mode 100644 store/piecestore/storage/ldfs.go diff --git a/store/piecestore/storage/const.go b/store/piecestore/storage/const.go index 0473b0067..3a6f6d0f5 100644 --- a/store/piecestore/storage/const.go +++ b/store/piecestore/storage/const.go @@ -10,6 +10,8 @@ const ( MinioStore = "minio" // DiskFileStore defines storage type for file DiskFileStore = "file" + // LdfsStore defines storage type for ldfs + LdfsStore = "ldfs" // MemoryStore defines storage type for memory MemoryStore = "memory" ) diff --git a/store/piecestore/storage/ldfs.go b/store/piecestore/storage/ldfs.go new file mode 100644 index 000000000..8580d8185 --- /dev/null +++ b/store/piecestore/storage/ldfs.go @@ -0,0 +1,95 @@ +package storage + +import ( + "fmt" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/bnb-chain/greenfield-storage-provider/pkg/log" +) + +var _ ObjectStorage = &ldfsStore{} + +var ( + // re-used ldfs sessions dramatically improve performance + ldfsSessionCache = &SessionCache{ + sessions: map[ObjectStorageConfig]*session.Session{}, + } +) + +type ldfsStore struct { + s3Store +} + +func newLdfsStore(cfg ObjectStorageConfig) (ObjectStorage, error) { + ldfsSession, bucket, err := ldfsSessionCache.newLdfsSession(cfg) + if err != nil { + log.Errorw("failed to new ldfs session", "error", err) + return nil, err + } + log.Infow("new ldfs store succeeds", "bucket", bucket) + return &ldfsStore{s3Store{bucketName: bucket, api: s3.New(ldfsSession)}}, nil +} + +func (sc *SessionCache) newLdfsSession(cfg ObjectStorageConfig) (*session.Session, string, error) { + sc.Lock() + defer sc.Unlock() + + endpoint, bucketName, disableSSL, err := parseLdfsBucketURL(cfg.BucketURL) + if err != nil { + log.Errorw("failed to parse ldfs bucket url", "error", err) + return nil, "", err + } + if sess, ok := sc.sessions[cfg]; ok { + return sess, bucketName, nil + } + + // There is no concept of `region` in LDFS + awsConfig := &aws.Config{ + Region: aws.String("ldfs"), + Endpoint: aws.String(endpoint), + DisableSSL: aws.Bool(!disableSSL), + S3ForcePathStyle: aws.Bool(true), + HTTPClient: getHTTPClient(cfg.TLSInsecureSkipVerify), + } + // We don't need additional authentication. + // Because we use a whitelist to restrict the IPs that can access LDFS. + awsConfig.Credentials = credentials.NewStaticCredentials("ldfs", "ldfs", "") + + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, "", fmt.Errorf("failed to create ldfs session: %s", err) + } + sc.sessions[cfg] = sess + return sess, bucketName, nil +} + +func (m *ldfsStore) String() string { + return fmt.Sprintf("ldfs://%s/", m.s3Store.bucketName) +} + +func parseLdfsBucketURL(bucketURL string) (string, string, bool, error) { + // 1. parse bucket url + if !strings.Contains(bucketURL, "://") { + bucketURL = fmt.Sprintf("http://%s", bucketURL) + } + uri, err := url.ParseRequestURI(bucketURL) + if err != nil { + return "", "", false, fmt.Errorf("invalid endpoint %s: %s", bucketURL, err) + } + + // 2. check if ldfs uses https + ssl := strings.ToLower(uri.Scheme) == "https" + + // 3. get bucket name + if len(uri.Path) < 2 { + return "", "", false, fmt.Errorf("no bucket name provided in %s", bucketURL) + } + bucketName := strings.Split(uri.Path, "/")[1] + return uri.Host, bucketName, ssl, nil +} diff --git a/store/piecestore/storage/object_storage.go b/store/piecestore/storage/object_storage.go index c0f6123db..3fe54c2c1 100644 --- a/store/piecestore/storage/object_storage.go +++ b/store/piecestore/storage/object_storage.go @@ -24,6 +24,7 @@ var storageMap = map[string]StorageFn{ S3Store: newS3Store, B2Store: newB2Store, MinioStore: newMinioStore, + LdfsStore: newLdfsStore, DiskFileStore: newDiskFileStore, MemoryStore: newMemoryStore, } From 213ecd816e5019fa4ddee3ee4e7c0c40b8764765 Mon Sep 17 00:00:00 2001 From: joeycli Date: Tue, 13 Jun 2023 19:24:05 +0800 Subject: [PATCH 07/78] feat: add perf metrics (#591) * feat: add perf mertics * fix: golang ci lint error --- base/gfspapp/authorizer_server.go | 4 ++++ base/gfspapp/download_server.go | 5 ++++ base/gfspapp/receive_server.go | 5 ++++ base/gfspapp/upload_server.go | 5 ++++ base/gfspclient/authorizer.go | 7 ++++++ base/gfspclient/downloader.go | 6 +++++ base/gfspclient/receiver.go | 6 +++++ base/gfspclient/uploader.go | 1 + modular/authorizer/authorizer.go | 27 ++++++++++++++++++++++ modular/downloader/download_task.go | 22 +++++++++++++++++- modular/executor/execute_replicate.go | 2 ++ modular/executor/execute_task.go | 3 +++ modular/gater/admin_handler.go | 27 ++++++++++++++++++++++ modular/gater/object_handler.go | 20 ++++++++++++++++ modular/manager/manage_task.go | 5 ++-- modular/manager/manager.go | 1 + modular/receiver/receive_task.go | 33 +++++++++++++++++++++++++++ pkg/metrics/metric_items.go | 24 +++++++++++++++++++ store/piecestore/storage/ldfs.go | 2 +- 19 files changed, 201 insertions(+), 4 deletions(-) diff --git a/base/gfspapp/authorizer_server.go b/base/gfspapp/authorizer_server.go index ce6202e85..97f728661 100644 --- a/base/gfspapp/authorizer_server.go +++ b/base/gfspapp/authorizer_server.go @@ -2,11 +2,13 @@ package gfspapp import ( "context" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" coremodule "github.com/bnb-chain/greenfield-storage-provider/core/module" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" ) var _ gfspserver.GfSpAuthorizationServiceServer = &GfSpBaseApp{} @@ -15,8 +17,10 @@ func (g *GfSpBaseApp) GfSpVerifyAuthorize(ctx context.Context, req *gfspserver.G ctx = log.WithValue(ctx, log.CtxKeyBucketName, req.GetBucketName()) ctx = log.WithValue(ctx, log.CtxKeyObjectName, req.GetObjectName()) log.CtxDebugw(ctx, "begin to authorize", "user", req.GetUserAccount(), "auth_type", req.GetAuthType()) + startTime := time.Now() allow, err := g.authorizer.VerifyAuthorize(ctx, coremodule.AuthOpType(req.GetAuthType()), req.GetUserAccount(), req.GetBucketName(), req.GetObjectName()) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_total_time").Observe(time.Since(startTime).Seconds()) log.CtxDebugw(ctx, "finish to authorize", "user", req.GetUserAccount(), "auth_type", req.GetAuthType(), "allow", allow, "error", err) return &gfspserver.GfSpAuthorizeResponse{ diff --git a/base/gfspapp/download_server.go b/base/gfspapp/download_server.go index 75a4db775..1cc7c0505 100644 --- a/base/gfspapp/download_server.go +++ b/base/gfspapp/download_server.go @@ -3,6 +3,7 @@ package gfspapp import ( "context" "net/http" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" @@ -65,6 +66,8 @@ func (g *GfSpBaseApp) OnDownloadObjectTask(ctx context.Context, downloadObjectTa func (g *GfSpBaseApp) GfSpDownloadPiece(ctx context.Context, req *gfspserver.GfSpDownloadPieceRequest) ( *gfspserver.GfSpDownloadPieceResponse, error) { downloadPieceTask := req.GetDownloadPieceTask() + startTime := time.Now() + defer metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_server_total_time").Observe(time.Since(startTime).Seconds()) if downloadPieceTask == nil { log.Error("failed to download piece due to task pointer dangling") return &gfspserver.GfSpDownloadPieceResponse{Err: ErrDownloadTaskDangling}, nil @@ -108,6 +111,8 @@ func (g *GfSpBaseApp) OnDownloadPieceTask(ctx context.Context, downloadPieceTask func (g *GfSpBaseApp) GfSpGetChallengeInfo(ctx context.Context, req *gfspserver.GfSpGetChallengeInfoRequest) ( *gfspserver.GfSpGetChallengeInfoResponse, error) { + startTime := time.Now() + defer metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_server_total_time").Observe(time.Since(startTime).Seconds()) challengePieceTask := req.GetChallengePieceTask() if challengePieceTask == nil { log.CtxError(ctx, "failed to challenge piece due to task pointer dangling") diff --git a/base/gfspapp/receive_server.go b/base/gfspapp/receive_server.go index b0c6a15bd..7e3ac01bf 100644 --- a/base/gfspapp/receive_server.go +++ b/base/gfspapp/receive_server.go @@ -3,6 +3,7 @@ package gfspapp import ( "context" "net/http" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" @@ -19,6 +20,8 @@ var ( func (g *GfSpBaseApp) GfSpReplicatePiece(ctx context.Context, req *gfspserver.GfSpReplicatePieceRequest) ( *gfspserver.GfSpReplicatePieceResponse, error) { + startTime := time.Now() + defer metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_total_time").Observe(time.Since(startTime).Seconds()) task := req.GetReceivePieceTask() if task == nil { log.Error("failed to receive piece due to task pointer dangling") @@ -44,6 +47,8 @@ func (g *GfSpBaseApp) GfSpReplicatePiece(ctx context.Context, req *gfspserver.Gf func (g *GfSpBaseApp) GfSpDoneReplicatePiece(ctx context.Context, req *gfspserver.GfSpDoneReplicatePieceRequest) ( *gfspserver.GfSpDoneReplicatePieceResponse, error) { + startTime := time.Now() + defer metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_done_server_total_time").Observe(time.Since(startTime).Seconds()) task := req.GetReceivePieceTask() if task == nil { log.Error("failed to done receive piece due to task pointer dangling") diff --git a/base/gfspapp/upload_server.go b/base/gfspapp/upload_server.go index ec14715f2..62aa07ffb 100644 --- a/base/gfspapp/upload_server.go +++ b/base/gfspapp/upload_server.go @@ -52,13 +52,18 @@ func (g *GfSpBaseApp) GfSpUploadObject(stream gfspserver.GfSpUploadService_GfSpU if err != nil { resp.Err = gfsperrors.MakeGfSpError(err) } + + closeTime := time.Now() err = stream.SendAndClose(resp) + metrics.PerfUploadTimeHistogram.WithLabelValues("server_send_and_close_time").Observe(time.Since(closeTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to close upload object stream", "error", err) } }() go func() { + serverStartTime := time.Now() + defer metrics.PerfUploadTimeHistogram.WithLabelValues("server_total_time").Observe(time.Since(serverStartTime).Seconds()) init := false for { select { diff --git a/base/gfspclient/authorizer.go b/base/gfspclient/authorizer.go index 582e7eb74..9714e7fbb 100644 --- a/base/gfspclient/authorizer.go +++ b/base/gfspclient/authorizer.go @@ -2,15 +2,20 @@ package gfspclient import ( "context" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" coremodule "github.com/bnb-chain/greenfield-storage-provider/core/module" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "google.golang.org/grpc" ) func (s *GfSpClient) VerifyAuthorize(ctx context.Context, auth coremodule.AuthOpType, account, bucket, object string) (bool, error) { + startTime := time.Now() + defer metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_total_time").Observe(time.Since(startTime).Seconds()) conn, connErr := s.Connection(ctx, s.authorizerEndpoint) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_create_conn_time").Observe(time.Since(startTime).Seconds()) if connErr != nil { log.CtxErrorw(ctx, "client failed to connect authorizer", "error", connErr) return false, ErrRpcUnknown @@ -22,7 +27,9 @@ func (s *GfSpClient) VerifyAuthorize(ctx context.Context, auth coremodule.AuthOp BucketName: bucket, ObjectName: object, } + startRequestTime := time.Now() resp, err := gfspserver.NewGfSpAuthorizationServiceClient(conn).GfSpVerifyAuthorize(ctx, req) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_network_time").Observe(time.Since(startRequestTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to verify authorize", "error", err) return false, ErrRpcUnknown diff --git a/base/gfspclient/downloader.go b/base/gfspclient/downloader.go index c8646552a..f7892e3b0 100644 --- a/base/gfspclient/downloader.go +++ b/base/gfspclient/downloader.go @@ -2,7 +2,9 @@ package gfspclient import ( "context" + "time" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "google.golang.org/grpc" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" @@ -44,7 +46,9 @@ func (s *GfSpClient) GetPiece(ctx context.Context, downloadPieceTask coretask.Do req := &gfspserver.GfSpDownloadPieceRequest{ DownloadPieceTask: downloadPieceTask.(*gfsptask.GfSpDownloadPieceTask), } + startTime := time.Now() resp, err := gfspserver.NewGfSpDownloadServiceClient(conn).GfSpDownloadPiece(ctx, req) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_client_total_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to download piece", "error", err) return nil, ErrRpcUnknown @@ -66,7 +70,9 @@ func (s *GfSpClient) GetChallengeInfo(ctx context.Context, challengePieceTask co req := &gfspserver.GfSpGetChallengeInfoRequest{ ChallengePieceTask: challengePieceTask.(*gfsptask.GfSpChallengePieceTask), } + startTime := time.Now() resp, err := gfspserver.NewGfSpDownloadServiceClient(conn).GfSpGetChallengeInfo(ctx, req) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_client_total_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to get challenge piece info", "error", err) return nil, nil, nil, ErrRpcUnknown diff --git a/base/gfspclient/receiver.go b/base/gfspclient/receiver.go index 4570b5552..33bd015eb 100644 --- a/base/gfspclient/receiver.go +++ b/base/gfspclient/receiver.go @@ -2,7 +2,9 @@ package gfspclient import ( "context" + "time" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "google.golang.org/grpc" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" @@ -23,7 +25,9 @@ func (s *GfSpClient) ReplicatePiece(ctx context.Context, task coretask.ReceivePi ReceivePieceTask: task.(*gfsptask.GfSpReceivePieceTask), PieceData: data, } + startTime := time.Now() resp, err := gfspserver.NewGfSpReceiveServiceClient(conn).GfSpReplicatePiece(ctx, req) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_client_total_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to replicate piece", "error", err) return ErrRpcUnknown @@ -45,7 +49,9 @@ func (s *GfSpClient) DoneReplicatePiece(ctx context.Context, task coretask.Recei req := &gfspserver.GfSpDoneReplicatePieceRequest{ ReceivePieceTask: task.(*gfsptask.GfSpReceivePieceTask), } + startTime := time.Now() resp, err := gfspserver.NewGfSpReceiveServiceClient(conn).GfSpDoneReplicatePiece(ctx, req) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_done_client_total_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to done replicate piece", "error", err) return nil, nil, ErrRpcUnknown diff --git a/base/gfspclient/uploader.go b/base/gfspclient/uploader.go index be89dc82a..26aaaf675 100644 --- a/base/gfspclient/uploader.go +++ b/base/gfspclient/uploader.go @@ -29,6 +29,7 @@ func (s *GfSpClient) UploadObject(ctx context.Context, task coretask.UploadObjec } else { log.CtxDebugw(ctx, "finished to send payload data", "send_size", sendSize) } + metrics.PerfUploadTimeHistogram.WithLabelValues("client_total_time").Observe(time.Since(startConnectUploader).Seconds()) }() startGetUploaderClient := time.Now() client, err := gfspserver.NewGfSpUploadServiceClient(conn).GfSpUploadObject(ctx) diff --git a/modular/authorizer/authorizer.go b/modular/authorizer/authorizer.go index 0900293a8..767464846 100644 --- a/modular/authorizer/authorizer.go +++ b/modular/authorizer/authorizer.go @@ -16,6 +16,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" paymenttypes "github.com/bnb-chain/greenfield/x/payment/types" storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) @@ -165,7 +166,9 @@ func (a *AuthorizeModular) VerifyAuthorize( authType coremodule.AuthOpType, account, bucket, object string) ( bool, error) { + startTime := time.Now() has, err := a.baseApp.Consensus().HasAccount(ctx, account) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_check_has_account_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to check account from consensus", "error", err) return false, ErrConsensus @@ -177,7 +180,9 @@ func (a *AuthorizeModular) VerifyAuthorize( switch authType { case coremodule.AuthOpAskCreateBucketApproval: + queryTime := time.Now() bucketInfo, _ := a.baseApp.Consensus().QueryBucketInfo(ctx, bucket) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_create_bucket_approval_query_bucket_time").Observe(time.Since(queryTime).Seconds()) if bucketInfo != nil { log.CtxErrorw(ctx, "failed to verify authorize of asking create bucket "+ "approval, bucket repeated", "bucket", bucket) @@ -185,7 +190,9 @@ func (a *AuthorizeModular) VerifyAuthorize( } return true, nil case coremodule.AuthOpAskCreateObjectApproval: + queryTime := time.Now() bucketInfo, objectInfo, _ := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_create_object_approval_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if bucketInfo == nil { log.CtxErrorw(ctx, "failed to verify authorize of asking create object "+ "approval, no such bucket to ask create object approval", "bucket", bucket, "object", object) @@ -198,7 +205,9 @@ func (a *AuthorizeModular) VerifyAuthorize( } return true, nil case coremodule.AuthOpTypePutObject: + queryTime := time.Now() bucketInfo, objectInfo, err := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_put_object_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get bucket and object info from consensus", "error", err) // refer to https://github.com/bnb-chain/greenfield/blob/master/x/storage/types/errors.go @@ -219,14 +228,18 @@ func (a *AuthorizeModular) VerifyAuthorize( log.CtxErrorw(ctx, "object state is not sealed", "state", objectInfo.GetObjectStatus()) return false, ErrNotCreatedState } + permissionTime := time.Now() allow, err := a.baseApp.Consensus().VerifyPutObjectPermission(ctx, account, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_put_object_verify_permission_time").Observe(time.Since(permissionTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to verify put object permission from consensus", "error", err) return false, ErrConsensus } return allow, nil case coremodule.AuthOpTypeGetUploadingState: + queryTime := time.Now() bucketInfo, objectInfo, err := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_object_process_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get bucket and object info from consensus", "error", err) // refer to https://github.com/bnb-chain/greenfield/blob/master/x/storage/types/errors.go @@ -247,14 +260,18 @@ func (a *AuthorizeModular) VerifyAuthorize( log.CtxErrorw(ctx, "object state is not created", "state", objectInfo.GetObjectStatus()) return false, ErrNotCreatedState } + permissionTime := time.Now() allow, err := a.baseApp.Consensus().VerifyPutObjectPermission(ctx, account, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_object_process_verify_permission_time").Observe(time.Since(permissionTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to verify put object permission from consensus", "error", err) return false, ErrConsensus } return allow, nil case coremodule.AuthOpTypeGetObject: + queryTime := time.Now() bucketInfo, objectInfo, err := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_object_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get bucket and object info from consensus", "error", err) // refer to https://github.com/bnb-chain/greenfield/blob/master/x/storage/types/errors.go @@ -275,7 +292,9 @@ func (a *AuthorizeModular) VerifyAuthorize( log.CtxErrorw(ctx, "object state is not sealed", "state", objectInfo.GetObjectStatus()) return false, ErrNotSealedState } + streamTime := time.Now() streamRecord, err := a.baseApp.Consensus().QueryPaymentStreamRecord(ctx, bucketInfo.GetPaymentAddress()) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_object_query_stream_time").Observe(time.Since(streamTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to query payment stream record from consensus", "error", err) return false, ErrConsensus @@ -284,14 +303,18 @@ func (a *AuthorizeModular) VerifyAuthorize( log.CtxErrorw(ctx, "failed to check payment due to account status is not active", "status", streamRecord.Status) return false, ErrPaymentState } + permissionTime := time.Now() allow, err := a.baseApp.Consensus().VerifyGetObjectPermission(ctx, account, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_object_verify_permission_time").Observe(time.Since(permissionTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get bucket and object info from consensus", "error", err) return false, ErrConsensus } return allow, nil case coremodule.AuthOpTypeGetBucketQuota, coremodule.AuthOpTypeListBucketReadRecord: + queryTime := time.Now() bucketInfo, err := a.baseApp.Consensus().QueryBucketInfo(ctx, bucket) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_get_bucket_quota_query_bucket_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get bucket info from consensus", "error", err) // refer to https://github.com/bnb-chain/greenfield/blob/master/x/storage/types/errors.go @@ -313,7 +336,9 @@ func (a *AuthorizeModular) VerifyAuthorize( return true, nil case coremodule.AuthOpTypeGetChallengePieceInfo: challengeIsFromValidator := false + queryTime := time.Now() validators, err := a.baseApp.Consensus().ListBondedValidators(ctx) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_challenge_query_validator_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to list validator from consensus", "error", err) return false, ErrConsensus @@ -329,7 +354,9 @@ func (a *AuthorizeModular) VerifyAuthorize( "actual_challenge_address", account) return false, ErrNoPermission } + queryTime = time.Now() bucketInfo, objectInfo, err := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) + metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_challenge_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get object info from consensus", "error", err) // refer to https://github.com/bnb-chain/greenfield/blob/master/x/storage/types/errors.go diff --git a/modular/downloader/download_task.go b/modular/downloader/download_task.go index 552a72591..f0145c6e5 100644 --- a/modular/downloader/download_task.go +++ b/modular/downloader/download_task.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net/http" + "time" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/core/module" @@ -13,6 +14,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/core/taskqueue" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "github.com/bnb-chain/greenfield-storage-provider/store/sqldb" storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) @@ -176,6 +178,7 @@ func (d *DownloadModular) PreDownloadPiece(ctx context.Context, downloadPieceTas return ErrRepeatedTask } + checkQuotaTime := time.Now() if downloadPieceTask.GetEnableCheck() { if err := d.baseApp.GfSpDB().CheckQuotaAndAddReadRecord( &spdb.ReadRecord{ @@ -191,6 +194,7 @@ func (d *DownloadModular) PreDownloadPiece(ctx context.Context, downloadPieceTas ReadQuotaSize: downloadPieceTask.GetBucketInfo().GetChargedReadQuota() + d.bucketFreeQuota, }, ); err != nil { + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_check_quota_time").Observe(time.Since(checkQuotaTime).Seconds()) log.CtxErrorw(ctx, "failed to check bucket quota", "error", err) if errors.Is(err, sqldb.ErrCheckQuotaEnough) { return ErrExceedBucketQuota @@ -198,6 +202,7 @@ func (d *DownloadModular) PreDownloadPiece(ctx context.Context, downloadPieceTas // ignore the access db error, it is the system's inner error, will be let the request go. } } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_check_quota_time").Observe(time.Since(checkQuotaTime).Seconds()) // report the task to the manager for monitor the download piece task d.baseApp.GfSpClient().ReportTask(ctx, downloadPieceTask) return nil @@ -215,17 +220,24 @@ func (d *DownloadModular) HandleDownloadPieceTask(ctx context.Context, downloadP } log.CtxDebugw(ctx, downloadPieceTask.Info()) }() + + pushTime := time.Now() if err = d.downloadQueue.Push(downloadPieceTask); err != nil { + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_push_time").Observe(time.Since(pushTime).Seconds()) log.CtxErrorw(ctx, "failed to push download queue", "error", err) return nil, err } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_push_time").Observe(time.Since(pushTime).Seconds()) defer d.downloadQueue.PopByKey(downloadPieceTask.Key()) + putPieceTime := time.Now() if pieceData, err = d.baseApp.PieceStore().GetPiece(ctx, downloadPieceTask.GetPieceKey(), int64(downloadPieceTask.GetPieceOffset()), int64(downloadPieceTask.GetPieceLength())); err != nil { + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_put_piece_time").Observe(time.Since(putPieceTime).Seconds()) log.CtxErrorw(ctx, "failed to get piece data from piece store", "task_info", downloadPieceTask.Info(), "error", err) return nil, ErrPieceStore } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_put_piece_time").Observe(time.Since(putPieceTime).Seconds()) return pieceData, nil } @@ -241,7 +253,7 @@ func (d *DownloadModular) PreChallengePiece(ctx context.Context, downloadPieceTa log.CtxErrorw(ctx, "failed to pre challenge piece due to object unsealed") return ErrObjectUnsealed } - d.baseApp.GfSpClient().ReportTask(ctx, downloadPieceTask) + go d.baseApp.GfSpClient().ReportTask(ctx, downloadPieceTask) return nil } @@ -258,16 +270,22 @@ func (d *DownloadModular) HandleChallengePiece(ctx context.Context, downloadPiec } log.CtxDebugw(ctx, downloadPieceTask.Info()) }() + pushTime := time.Now() if err = d.challengeQueue.Push(downloadPieceTask); err != nil { log.CtxErrorw(ctx, "failed to push challenge piece queue", "error", err) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_push_time").Observe(time.Since(pushTime).Seconds()) return nil, nil, nil, err } + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_push_time").Observe(time.Since(pushTime).Seconds()) + defer d.challengeQueue.PopByKey(downloadPieceTask.Key()) pieceKey := d.baseApp.PieceOp().ChallengePieceKey( downloadPieceTask.GetObjectInfo().Id.Uint64(), downloadPieceTask.GetSegmentIdx(), downloadPieceTask.GetRedundancyIdx()) + getIntegrityTime := time.Now() integrity, err = d.baseApp.GfSpDB().GetObjectIntegrity(downloadPieceTask.GetObjectInfo().Id.Uint64()) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_integrity_time").Observe(time.Since(getIntegrityTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get integrity hash", "error", err) return nil, nil, nil, ErrGfSpDB @@ -276,7 +294,9 @@ func (d *DownloadModular) HandleChallengePiece(ctx context.Context, downloadPiec log.CtxErrorw(ctx, "failed to get challenge info due to segment index wrong") return nil, nil, nil, ErrNoSuchPiece } + getPieceTime := time.Now() data, err = d.baseApp.PieceStore().GetPiece(ctx, pieceKey, 0, -1) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_piece_time").Observe(time.Since(getPieceTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get piece data", "error", err) return nil, nil, nil, ErrPieceStore diff --git a/modular/executor/execute_replicate.go b/modular/executor/execute_replicate.go index 385ee4fdb..bd83e9ce4 100644 --- a/modular/executor/execute_replicate.go +++ b/modular/executor/execute_replicate.go @@ -27,8 +27,10 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core err error approvals []*gfsptask.GfSpReplicatePieceApprovalTask ) + startReplicateTime := time.Now() defer func() { task.SetError(err) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_time").Observe(time.Since(startReplicateTime).Seconds()) }() if task == nil || task.GetObjectInfo() == nil || task.GetStorageParams() == nil { err = ErrDanglingPointer diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 5249b5fdd..fff504c28 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -60,6 +60,9 @@ func (e *ExecuteModular) sealObject(ctx context.Context, task coretask.ObjectTas // even though signer return error, maybe seal on chain successfully because // signer use the async mode, so ignore the error and listen directly err = e.listenSealObject(ctx, task.GetObjectInfo()) + if err != nil { + metrics.PerfUploadTimeHistogram.WithLabelValues("upload_replicate_seal_total_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + } return err } diff --git a/modular/gater/admin_handler.go b/modular/gater/admin_handler.go index 3957eaedc..b5e9b23d7 100644 --- a/modular/gater/admin_handler.go +++ b/modular/gater/admin_handler.go @@ -176,6 +176,7 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req checksums [][]byte data []byte ) + startTime := time.Now() defer func() { reqCtx.Cancel() if err != nil { @@ -186,6 +187,7 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req reqCtx.SetHttpCode(http.StatusOK) } log.CtxDebugw(reqCtx.Context(), reqCtx.String()) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_total_time").Observe(time.Since(startTime).Seconds()) }() reqCtx, err = NewRequestContext(r, g) @@ -199,8 +201,11 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req err = ErrInvalidHeader return } + + getObjectTime := time.Now() objectInfo, err := g.baseApp.Consensus().QueryObjectInfoByID(reqCtx.Context(), reqCtx.request.Header.Get(GnfdObjectIDHeader)) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_object_time").Observe(time.Since(getObjectTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get object info from consensus", "error", err) if strings.Contains(err.Error(), "No such object") { @@ -211,9 +216,11 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req return } if reqCtx.NeedVerifyAuthorizer() { + authTime := time.Now() authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), coremodule.AuthOpTypeGetChallengePieceInfo, reqCtx.Account(), objectInfo.GetBucketName(), objectInfo.GetObjectName()) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_auth_time").Observe(time.Since(authTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) return @@ -225,7 +232,9 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req } } + getBucketTime := time.Now() bucketInfo, err := g.baseApp.Consensus().QueryBucketInfo(reqCtx.Context(), objectInfo.GetBucketName()) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_bucket_time").Observe(time.Since(getBucketTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get bucket info from consensus", "error", err) err = ErrConsensus @@ -245,8 +254,10 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req err = ErrInvalidHeader return } + getParamTime := time.Now() params, err := g.baseApp.Consensus().QueryStorageParamsByTimestamp( reqCtx.Context(), objectInfo.GetCreateAt()) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_param_time").Observe(time.Since(getParamTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get storage params", "error", err) return @@ -264,7 +275,9 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req task.InitChallengePieceTask(objectInfo, bucketInfo, params, g.baseApp.TaskPriority(task), reqCtx.Account(), redundancyIdx, segmentIdx, g.baseApp.TaskTimeout(task, pieceSize), g.baseApp.TaskMaxRetry(task)) ctx := log.WithValue(reqCtx.Context(), log.CtxKeyTask, task.Key().String()) + getChallengeInfoTime := time.Now() integrity, checksums, data, err = g.baseApp.GfSpClient().GetChallengeInfo(reqCtx.Context(), task) + metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_get_info_time").Observe(time.Since(getChallengeInfoTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get challenge info", "error", err) return @@ -291,6 +304,7 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { currentHeight uint64 approval = gfsptask.GfSpReplicatePieceApprovalTask{} ) + receivePieceStartTime := time.Now() defer func() { reqCtx.Cancel() if err != nil { @@ -301,6 +315,7 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { reqCtx.SetHttpCode(http.StatusOK) } log.CtxDebugw(reqCtx.Context(), reqCtx.String()) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_total_time").Observe(time.Since(receivePieceStartTime).Seconds()) }() // ignore the error, because the replicate request only between SPs, the request // verification is by signature of the ReceivePieceTask @@ -325,13 +340,17 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { err = ErrMismatchSp return } + verifySignatureTime := time.Now() err = p2pnode.VerifySignature(g.baseApp.OperateAddress(), approval.GetSignBytes(), approval.GetApprovedSignature()) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_verify_approval_time").Observe(time.Since(verifySignatureTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify replicate piece approval signature") err = ErrSignature return } + getBlockHeightTime := time.Now() currentHeight, err = g.baseApp.Consensus().CurrentHeight(reqCtx.Context()) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_get_block_height_time").Observe(time.Since(getBlockHeightTime).Seconds()) if err != nil { // ignore the system's inner error,let the request go log.CtxErrorw(reqCtx.Context(), "failed to get current block height") @@ -341,7 +360,9 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { return } + decodeTime := time.Now() receiveMsg, err = hex.DecodeString(r.Header.Get(GnfdReceiveMsgHeader)) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_decode_task_time").Observe(time.Since(decodeTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to parse receive header", "receive", r.Header.Get(GnfdReceiveMsgHeader)) @@ -363,20 +384,26 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { err = ErrInvalidHeader return } + readDataTime := time.Now() data, err = io.ReadAll(r.Body) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_read_piece_time").Observe(time.Since(readDataTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to read replicate piece data", "error", err) err = ErrExceptionStream return } if receiveTask.GetPieceIdx() >= 0 { + handlePieceTime := time.Now() err = g.baseApp.GfSpClient().ReplicatePiece(reqCtx.Context(), &receiveTask, data) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_receive_data_time").Observe(time.Since(handlePieceTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to receive piece", "error", err) return } } else { + donePieceTime := time.Now() integrity, signature, err = g.baseApp.GfSpClient().DoneReplicatePiece(reqCtx.Context(), &receiveTask) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_done_time").Observe(time.Since(donePieceTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to done receive piece", "error", err) return diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index c236a15f1..d58afe108 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -33,6 +33,7 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { params *storagetypes.Params ) + uploadPrimaryStartTime := time.Now() defer func() { reqCtx.Cancel() if err != nil { @@ -43,6 +44,7 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { reqCtx.SetHttpCode(http.StatusOK) } log.CtxDebugw(reqCtx.Context(), reqCtx.String()) + metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_primary_total_time").Observe(time.Since(uploadPrimaryStartTime).Seconds()) }() reqCtx, err = NewRequestContext(r, g) @@ -143,6 +145,7 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { highOffset int64 pieceInfos []*downloader.SegmentPieceInfo ) + getObjectStartTime := time.Now() defer func() { reqCtx.Cancel() if err != nil { @@ -153,6 +156,7 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { reqCtx.SetHttpCode(http.StatusOK) } log.CtxDebugw(reqCtx.Context(), reqCtx.String()) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_total_time").Observe(time.Since(getObjectStartTime).Seconds()) }() reqCtx, reqCtxErr = NewRequestContext(r, g) // check the object permission whether allow public read. @@ -169,11 +173,14 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { return } if reqCtx.NeedVerifyAuthorizer() { + authTime := time.Now() if authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), coremodule.AuthOpTypeGetObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName); err != nil { + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_auth_time").Observe(time.Since(authTime).Seconds()) log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) return } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_auth_time").Observe(time.Since(authTime).Seconds()) if !authorized { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission @@ -182,19 +189,27 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { } } // else anonymous users can get public object. + getObjectTime := time.Now() objectInfo, err = g.baseApp.Consensus().QueryObjectInfo(reqCtx.Context(), reqCtx.bucketName, reqCtx.objectName) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_get_object_info_time").Observe(time.Since(getObjectTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get object info from consensus", "error", err) err = ErrConsensus return } + + getBucketTime := time.Now() bucketInfo, err = g.baseApp.Consensus().QueryBucketInfo(reqCtx.Context(), objectInfo.GetBucketName()) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_get_bucket_info_time").Observe(time.Since(getBucketTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get bucket info from consensus", "error", err) err = ErrConsensus return } + + getParamTime := time.Now() params, err = g.baseApp.Consensus().QueryStorageParamsByTimestamp(reqCtx.Context(), objectInfo.GetCreateAt()) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_get_storage_param_time").Observe(time.Since(getParamTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to get storage params from consensus", "error", err) err = ErrConsensus @@ -232,6 +247,8 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { } else { w.Header().Set(ContentLengthHeader, util.Uint64ToString(objectInfo.GetPayloadSize())) } + + getDataTime := time.Now() for idx, pInfo := range pieceInfos { enableCheck := false if idx == 0 { // only check in first piece @@ -241,13 +258,16 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { pieceTask.InitDownloadPieceTask(objectInfo, bucketInfo, params, g.baseApp.TaskPriority(task), enableCheck, reqCtx.Account(), uint64(highOffset-lowOffset+1), pInfo.SegmentPieceKey, pInfo.Offset, pInfo.Length, g.baseApp.TaskTimeout(task, uint64(pieceTask.GetSize())), g.baseApp.TaskMaxRetry(task)) + getSegmentTime := time.Now() pieceData, err := g.baseApp.GfSpClient().GetPiece(reqCtx.Context(), pieceTask) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_segment_data_time").Observe(time.Since(getSegmentTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to download piece", "error", err) return } w.Write(pieceData) } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_get_data_time").Observe(time.Since(getDataTime).Seconds()) } // queryUploadProgressHandler handles the query uploaded object progress request. diff --git a/modular/manager/manage_task.go b/modular/manager/manage_task.go index 2b4f70fa5..82d3da0f1 100644 --- a/modular/manager/manage_task.go +++ b/modular/manager/manage_task.go @@ -294,7 +294,6 @@ func (m *ManageModular) handleFailedSealObjectTask(ctx context.Context, handleTa log.CtxDebugw(ctx, "push task again to retry", "task_info", handleTask.Info(), "error", err) return nil } else { - metrics.SealObjectFailedCounter.WithLabelValues(m.Name()).Inc() if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ ObjectID: handleTask.GetObjectInfo().Id.Uint64(), TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_ERROR, @@ -302,7 +301,9 @@ func (m *ManageModular) handleFailedSealObjectTask(ctx context.Context, handleTa }); err != nil { log.CtxErrorw(ctx, "failed to update object task state", "task_info", handleTask.Info(), "error", err) } - log.CtxWarnw(ctx, "delete expired seal object task", "task_info", handleTask.Info()) + err := m.RejectUnSealObject(ctx, handleTask.GetObjectInfo()) + log.CtxWarnw(ctx, "delete expired seal object task and reject unseal object", + "task_info", handleTask.Info(), "reject_unseal_error", err) } return nil } diff --git a/modular/manager/manager.go b/modular/manager/manager.go index 720993b18..3b5a7633a 100644 --- a/modular/manager/manager.go +++ b/modular/manager/manager.go @@ -459,6 +459,7 @@ func (m *ManageModular) syncConsensusInfo(ctx context.Context) { } func (m *ManageModular) RejectUnSealObject(ctx context.Context, object *storagetypes.ObjectInfo) error { + metrics.SealObjectFailedCounter.WithLabelValues(m.Name()).Inc() rejectUnSealObjectMsg := &storagetypes.MsgRejectSealObject{ BucketName: object.GetBucketName(), ObjectName: object.GetObjectName(), diff --git a/modular/receiver/receive_task.go b/modular/receiver/receive_task.go index 9852e48de..7807d28d0 100644 --- a/modular/receiver/receive_task.go +++ b/modular/receiver/receive_task.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "net/http" + "time" "github.com/bnb-chain/greenfield-common/go/hash" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" @@ -12,6 +13,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/core/taskqueue" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) @@ -40,12 +42,18 @@ func (r *ReceiveModular) HandleReceivePieceTask(ctx context.Context, task task.R err = ErrDanglingTask return ErrDanglingTask } + checkHasTime := time.Now() if r.receiveQueue.Has(task.Key()) { + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_check_has_time").Observe(time.Since(checkHasTime).Seconds()) log.CtxErrorw(ctx, "has repeat receive task") err = ErrRepeatedTask return ErrRepeatedTask } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_check_has_time").Observe(time.Since(checkHasTime).Seconds()) + + pushTime := time.Now() err = r.receiveQueue.Push(task) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_push_time").Observe(time.Since(pushTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to push receive task to queue", "error", err) return err @@ -64,16 +72,23 @@ func (r *ReceiveModular) HandleReceivePieceTask(ctx context.Context, task task.R pieceKey = r.baseApp.PieceOp().SegmentPieceKey(task.GetObjectInfo().Id.Uint64(), uint32(task.GetPieceIdx())) } + setDBTime := time.Now() if err = r.baseApp.GfSpDB().SetReplicatePieceChecksum(task.GetObjectInfo().Id.Uint64(), task.GetReplicateIdx(), uint32(task.GetPieceIdx()), task.GetPieceChecksum()); err != nil { + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_set_mysql_time").Observe(time.Since(setDBTime).Seconds()) log.CtxErrorw(ctx, "failed to set checksum to db", "error", err) err = ErrGfSpDB return ErrGfSpDB } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_set_mysql_time").Observe(time.Since(setDBTime).Seconds()) + + setPieceTime := time.Now() if err = r.baseApp.PieceStore().PutPiece(ctx, pieceKey, data); err != nil { + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_set_piece_time").Observe(time.Since(setPieceTime).Seconds()) err = ErrPieceStore return ErrPieceStore } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_set_piece_time").Observe(time.Since(setPieceTime).Seconds()) log.CtxDebugw(ctx, "succeed to receive piece data") return nil } @@ -92,10 +107,14 @@ func (r *ReceiveModular) HandleDoneReceivePieceTask(ctx context.Context, task ta err = ErrDanglingTask return nil, nil, ErrDanglingTask } + pushTime := time.Now() if err = r.receiveQueue.Push(task); err != nil { + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_push_time").Observe(time.Since(pushTime).Seconds()) log.CtxErrorw(ctx, "failed to push receive task", "error", err) return nil, nil, err } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_push_time").Observe(time.Since(pushTime).Seconds()) + defer r.receiveQueue.PopByKey(task.Key()) if task == nil || task.GetObjectInfo() == nil { log.CtxErrorw(ctx, "failed to done receive task, pointer dangling") @@ -104,8 +123,11 @@ func (r *ReceiveModular) HandleDoneReceivePieceTask(ctx context.Context, task ta } segmentCount := r.baseApp.PieceOp().SegmentPieceCount(task.GetObjectInfo().GetPayloadSize(), task.GetStorageParams().VersionedParams.GetMaxSegmentSize()) + + getChecksumsTime := time.Now() checksums, err := r.baseApp.GfSpDB().GetAllReplicatePieceChecksum( task.GetObjectInfo().Id.Uint64(), task.GetReplicateIdx(), segmentCount) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_get_checksums_time").Observe(time.Since(getChecksumsTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get checksum from db", "error", err) err = ErrGfSpDB @@ -116,8 +138,10 @@ func (r *ReceiveModular) HandleDoneReceivePieceTask(ctx context.Context, task ta err = ErrUnfinishedTask return nil, nil, ErrUnfinishedTask } + signTime := time.Now() signature, integrity, err := r.baseApp.GfSpClient().SignIntegrityHash(ctx, task.GetObjectInfo().Id.Uint64(), checksums) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_sign_time").Observe(time.Since(signTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign the integrity hash", "error", err) return nil, nil, err @@ -128,23 +152,32 @@ func (r *ReceiveModular) HandleDoneReceivePieceTask(ctx context.Context, task ta PieceChecksumList: checksums, Signature: signature, } + setIntegrityTime := time.Now() err = r.baseApp.GfSpDB().SetObjectIntegrity(integrityMeta) + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_set_integrity_time").Observe(time.Since(setIntegrityTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to write integrity meta to db", "error", err) err = ErrGfSpDB return nil, nil, ErrGfSpDB } + deletePieceHashTime := time.Now() if err = r.baseApp.GfSpDB().DeleteAllReplicatePieceChecksum( task.GetObjectInfo().Id.Uint64(), task.GetReplicateIdx(), segmentCount); err != nil { log.CtxErrorw(ctx, "failed to delete all replicate piece checksum", "error", err) // ignore the error,let the request go, the background task will gc the meta again later + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_delete_piece_hash_time").Observe(time.Since(deletePieceHashTime).Seconds()) } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_delete_piece_hash_time").Observe(time.Since(deletePieceHashTime).Seconds()) // the manager dispatch the task to confirm whether seal on chain as secondary sp. task.SetError(nil) + + reportTime := time.Now() if err = r.baseApp.GfSpClient().ReportTask(ctx, task); err != nil { log.CtxErrorw(ctx, "failed to report receive task for confirming seal", "error", err) // ignore the error,let the request go, the background task will gc the unsealed data later + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_report_time").Observe(time.Since(reportTime).Seconds()) } + metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_server_done_report_time").Observe(time.Since(reportTime).Seconds()) log.CtxDebugw(ctx, "succeed to done receive piece") return integrity, signature, nil } diff --git a/pkg/metrics/metric_items.go b/pkg/metrics/metric_items.go index 917837692..e9ab342e3 100644 --- a/pkg/metrics/metric_items.go +++ b/pkg/metrics/metric_items.go @@ -16,6 +16,10 @@ var MetricsItems = []prometheus.Collector{ // Perf workflow category PerfUploadTimeHistogram, PerfGetApprovalTimeHistogram, + PerfAuthTimeHistogram, + PerfReceivePieceTimeHistogram, + PerfGetObjectTimeHistogram, + PerfChallengeTimeHistogram, // TaskQueue metrics category QueueSizeGauge, QueueCapGauge, @@ -106,6 +110,26 @@ var ( Help: "Track get approval workflow costs.", Buckets: prometheus.DefBuckets, }, []string{"perf_get_approval_time"}) + PerfAuthTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "perf_auth_time", + Help: "Track auth workflow costs.", + Buckets: prometheus.DefBuckets, + }, []string{"perf_auth_time"}) + PerfReceivePieceTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "perf_receive_time", + Help: "Track receive piece workflow costs.", + Buckets: prometheus.DefBuckets, + }, []string{"perf_receive_time"}) + PerfGetObjectTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "perf_get_object_time", + Help: "Track get object workflow costs.", + Buckets: prometheus.DefBuckets, + }, []string{"perf_get_object_time"}) + PerfChallengeTimeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "perf_challenge_piece_time", + Help: "Track challenge piece workflow costs.", + Buckets: prometheus.DefBuckets, + }, []string{"perf_challenge_piece_time"}) // task queue metrics QueueSizeGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ diff --git a/store/piecestore/storage/ldfs.go b/store/piecestore/storage/ldfs.go index 8580d8185..cce2a7679 100644 --- a/store/piecestore/storage/ldfs.go +++ b/store/piecestore/storage/ldfs.go @@ -57,7 +57,7 @@ func (sc *SessionCache) newLdfsSession(cfg ObjectStorageConfig) (*session.Sessio S3ForcePathStyle: aws.Bool(true), HTTPClient: getHTTPClient(cfg.TLSInsecureSkipVerify), } - // We don't need additional authentication. + // We don't need additional authentication. // Because we use a whitelist to restrict the IPs that can access LDFS. awsConfig.Credentials = credentials.NewStaticCredentials("ldfs", "ldfs", "") From d687eee07017069fde1ae05ddc653276bfe60bee Mon Sep 17 00:00:00 2001 From: joeycli Date: Tue, 13 Jun 2023 21:25:54 +0800 Subject: [PATCH 08/78] fix: metrics collect bug (#593) --- modular/executor/execute_task.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index fff504c28..2e5979748 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -60,7 +60,7 @@ func (e *ExecuteModular) sealObject(ctx context.Context, task coretask.ObjectTas // even though signer return error, maybe seal on chain successfully because // signer use the async mode, so ignore the error and listen directly err = e.listenSealObject(ctx, task.GetObjectInfo()) - if err != nil { + if err == nil { metrics.PerfUploadTimeHistogram.WithLabelValues("upload_replicate_seal_total_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) } return err From 743e5fafee8b21155a44e2ce08ffa3189e802546 Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Wed, 14 Jun 2023 16:54:03 +0800 Subject: [PATCH 09/78] test: use gnfd-cmd to do e2e test (#585) * test: use gnfd-cmd to do e2e test * test: complete e2e test shell script * test: add e2e test shell script * test: remove github access token env var in all actions --------- Co-authored-by: VM --- .github/workflows/build-test.yml | 8 +- .github/workflows/code-lint.yml | 6 - .github/workflows/commit-lint.yml | 4 +- .github/workflows/docker-release.yml | 1 - .github/workflows/e2e-test.yml | 63 ++---- .github/workflows/gosec.yml | 7 +- .github/workflows/release.yml | 6 - .github/workflows/unit-test.yml | 6 - build.sh | 1 + deployment/localup/localup.sh | 30 +-- .../localup_env/integration_config/config.yml | 42 ---- test/e2e/spworkflow/e2e_test.sh | 182 ++++++++++++++++++ test/e2e/spworkflow/testdata/example.json | 33 ++++ 13 files changed, 246 insertions(+), 143 deletions(-) delete mode 100644 test/e2e/localup_env/integration_config/config.yml create mode 100644 test/e2e/spworkflow/e2e_test.sh create mode 100644 test/e2e/spworkflow/testdata/example.json diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 6ce5ecf49..f7e976627 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -22,17 +22,11 @@ jobs: go-version: [1.20.x] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} - env: - GOPRIVATE: github.com/bnb-chain - GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} steps: - name: Install Go uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - - name: Setup GitHub Token - run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/ - - name: Checkout code uses: actions/checkout@v3 @@ -57,7 +51,7 @@ jobs: version: 1.14.0 buf_user: "${{ secrets.BUF_REGISTRY_USER }}" buf_api_token: "${{ secrets.BUF_REGISTRY_SECRET }}" - + - name: Install Protoc uses: arduino/setup-protoc@v1 diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 819892e36..8b0e9e9ad 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -21,9 +21,6 @@ jobs: golangci: name: golangci-lint runs-on: ${{ matrix.os }} - env: - GOPRIVATE: github.com/bnb-chain - GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} timeout-minutes: 8 strategy: matrix: @@ -31,9 +28,6 @@ jobs: os: [ ubuntu-latest ] steps: - uses: actions/checkout@v3 - - name: Setup GitHub Token - run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/ - - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} diff --git a/.github/workflows/commit-lint.yml b/.github/workflows/commit-lint.yml index 445fd9d27..16533cdba 100644 --- a/.github/workflows/commit-lint.yml +++ b/.github/workflows/commit-lint.yml @@ -8,7 +8,6 @@ on: - release* - fix-release* - pull_request: branches: - master @@ -16,7 +15,6 @@ on: - release* - fix-release* - jobs: commit-message-lint: strategy: @@ -66,4 +64,4 @@ jobs: allowed_prefixes: 'build,chore,ci,docs,feat,fix,perf,refactor,revert,style,test' ignore: master,develop,release min_length: 5 - max_length: 30 \ No newline at end of file + max_length: 30 diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 489d1379e..58018c082 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -62,4 +62,3 @@ jobs: docker tag $IMAGE_NAME $IMAGE_NAME:latest docker push $IMAGE_NAME:$VERSION docker push $IMAGE_NAME:latest - \ No newline at end of file diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index fe9b315be..b3d849d68 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -15,17 +15,6 @@ on: - release* - fix-release* -env: - GreenfieldTag: v0.2.1-alpha.1 - GreenfieldIntegrationTestBranch: sp-refactor-0.2.1-new-sdk - GOPRIVATE: github.com/bnb-chain - GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} - MYSQL_USER: root - MYSQL_PASSWORD: root - MYSQL_ROOT_PASSWORD: root - MYSQL_IP: 127.0.0.1 - MYSQL_PORT: 3306 - jobs: e2e-test: strategy: @@ -39,54 +28,26 @@ jobs: uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - - name: Setup GitHub Token - run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/ # Checkout Greenfield-SP - name: Checkout Greenfield-SP uses: actions/checkout@v3 - # Build Greenfield - - name: Build Greenfield - run: | - git clone https://github.com/bnb-chain/greenfield.git - cd greenfield - git checkout ${{ env.GreenfieldTag }} - make proto-gen & make build - cd .. - # Build Greenfield-SP - - name: Build Greenfield-SP - run: | - make install-tools - make build # Run MySQL - name: Run MySQL run: | sudo /etc/init.d/mysql start - # Run Greenfield - - name: Run Greenfield + # Build and Start Greenfield Blockchain + - name: Build and Start Greenfield Blockchain + run: | + bash ./test/e2e/spworkflow/e2e_test.sh --startChain +# # Build and Start Greenfield SP + - name: Build and Start Greenfield SP run: | - cd greenfield - bash ./deployment/localup/localup.sh all 1 7 - bash ./deployment/localup/localup.sh export_sps 1 7 > sp.json - cd .. - # Run Greenfield-SPs - - name: Run Greenfield-SPs + bash ./test/e2e/spworkflow/e2e_test.sh --startSP + # Build Greenfield Cmd + - name: Build Greenfield Cmd run: | - bash ./deployment/localup/localup.sh --generate $GITHUB_WORKSPACE/greenfield/sp.json ${{ env.MYSQL_USER }} ${{ env.MYSQL_PASSWORD }} ${{ env.MYSQL_IP }}:${{ env.MYSQL_PORT }} - bash ./deployment/localup/localup.sh --reset - bash ./deployment/localup/localup.sh --start - sleep 10 - tail -n 1000 deployment/localup/local_env/sp0/gnfd-sp.log - ps -ef | grep gnfd-sp | wc -l - # Run E2E Test + bash ./test/e2e/spworkflow/e2e_test.sh --buildCmd +# # Use Greenfield Cmd Running E2E Test - name: Run Greenfield E2E Test run: | - git clone https://github.com/bnb-chain/greenfield-integration-test.git - cd greenfield-integration-test - git checkout ${{ env.GreenfieldIntegrationTestBranch }} - cd .. - bash ./deployment/localup/localup.sh --gene2e $GITHUB_WORKSPACE/greenfield/sp.json - cp -rf test/e2e/localup_env/integration_config/config.yml greenfield-integration-test/config/config.yml - cd greenfield-integration-test - cat config/config.yml - go test -v tests/storage/new_sdk_test.go - cd .. \ No newline at end of file + bash ./test/e2e/spworkflow/e2e_test.sh --runTest diff --git a/.github/workflows/gosec.yml b/.github/workflows/gosec.yml index 7bbe1b9f2..9b70501c1 100644 --- a/.github/workflows/gosec.yml +++ b/.github/workflows/gosec.yml @@ -23,16 +23,11 @@ jobs: go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} - env: - GOPRIVATE: github.com/bnb-chain - GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} steps: - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - uses: actions/checkout@v3 - - name: Setup GitHub Token - run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/ - uses: actions/cache@v3 with: # In order: @@ -56,4 +51,4 @@ jobs: - name: Run Gosec Security Scanner uses: securego/gosec@master with: - args: -quiet -confidence high -severity high ./... \ No newline at end of file + args: -quiet -confidence high -severity high ./... diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b2ac6765c..8bf39b2a5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,8 +14,6 @@ jobs: go-version: [1.20.x] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} - env: - GOPRIVATE: github.com/bnb-chain steps: - name: Checkout Code uses: actions/checkout@v3 @@ -40,8 +38,6 @@ jobs: key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- - - name: Setup GitHub Token - run: git config --global url.https://${{ secrets.GH_ACCESS_TOKEN }}@github.com/.insteadOf https://github.com/ - name: Checkout code uses: actions/checkout@v3 @@ -131,8 +127,6 @@ jobs: echo "EOF" >> $GITHUB_ENV - name: Create Release uses: softprops/action-gh-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} # This token is provided by Actions, you do not need to create your own token with: tag_name: ${{ env.RELEASE_VERSION}} release_name: ${{ env.RELEASE_VERSION}} diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index cfb32256d..8869bf34f 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -26,9 +26,6 @@ jobs: go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} - env: - GOPRIVATE: github.com/bnb-chain - GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} steps: - name: Install Go uses: actions/setup-go@v3 @@ -67,9 +64,6 @@ jobs: make install-tools make buf-gen - - name: Setup GitHub Token - run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/ - - name: Unit Test run: | make test diff --git a/build.sh b/build.sh index 3f8233534..4a185676f 100644 --- a/build.sh +++ b/build.sh @@ -20,6 +20,7 @@ go build -ldflags "\ if [ $? -ne 0 ]; then echo "build failed Ooooooh!!!" + exit 1 else echo "build succeed!" fi diff --git a/deployment/localup/localup.sh b/deployment/localup/localup.sh index 4943b18ba..757c10026 100644 --- a/deployment/localup/localup.sh +++ b/deployment/localup/localup.sh @@ -11,7 +11,7 @@ gnfd_workspace=${workspace}/../../greenfield/deployment/localup/ ######################### # the command line help # ######################### -display_help() { +function display_help() { echo "Usage: $0 [option...] {help|generate|reset|start|stop|print}" >&2 echo echo " --help display help info" @@ -43,7 +43,7 @@ function generate_sp_db_info() { for ((i=0;i<${SP_NUM};i++));do mkdir -p ${workspace}/${SP_DEPLOY_DIR}/sp${i} cp -rf ${sp_bin} ${workspace}/${SP_DEPLOY_DIR}/sp${i}/${sp_bin_name}${i} - cd ${workspace}/${SP_DEPLOY_DIR}/sp${i}/ + cd ${workspace}/${SP_DEPLOY_DIR}/sp${i}/ || exit 1 ./${sp_bin_name}${i} config.dump # generate sp info @@ -83,11 +83,11 @@ function generate_sp_db_info() { ############################################################# # make sp config.toml according to env.info/db.info/sp.info # ############################################################# -make_config() { +function make_config() { index=0 for sp_dir in ${workspace}/${SP_DEPLOY_DIR}/* ; do cur_port=$((SP_START_PORT+1000*$index)) - cd ${sp_dir} + cd ${sp_dir} || exit 1 source db.info source sp.info # app @@ -154,7 +154,7 @@ make_config() { ############################################################# # make integration test config.toml according sp.json # ############################################################# -make_integration_test_config() { +function make_integration_test_config() { index=0 sp_json_file=$1 file='test/e2e/localup_env/integration_config/config.yml' @@ -203,10 +203,10 @@ make_integration_test_config() { ############# # start sps # ############# -start_sp() { +function start_sp() { index=0 for sp_dir in ${workspace}/${SP_DEPLOY_DIR}/* ; do - cd ${sp_dir} + cd ${sp_dir} || exit 1 nohup ./${sp_bin_name}${index} --config config.toml log.txt 2>&1& echo "succeed to start sp in "${sp_dir} cd - >/dev/null @@ -218,7 +218,7 @@ start_sp() { ############ # stop sps # ############ -stop_sp() { +function stop_sp() { kill -9 $(pgrep -f ${sp_bin_name}) >/dev/null 2>&1 echo "succeed to stop storage providers" } @@ -226,9 +226,9 @@ stop_sp() { ############################################# # drop databases and recreate new databases # ############################################# -reset_sql_db() { +function reset_sql_db() { for sp_dir in ${workspace}/${SP_DEPLOY_DIR}/* ; do - cd ${sp_dir} + cd ${sp_dir} || exit 1 source db.info hostname=$(echo ${ADDRESS} | cut -d : -f 1) port=$(echo ${ADDRESS} | cut -d : -f 2) @@ -242,9 +242,9 @@ reset_sql_db() { ########################## # clean piece-store data # ########################## -reset_piece_store() { +function reset_piece_store() { for sp_dir in ${workspace}/${SP_DEPLOY_DIR}/* ; do - cd ${sp_dir} + cd ${sp_dir} || exit 1 rm -rf ./data echo "succeed to reset piece store in "${sp_dir} cd - >/dev/null @@ -254,7 +254,7 @@ reset_piece_store() { ################## # print work dir # ################## -print_work_dir() { +function print_work_dir() { for sp_dir in ${workspace}/${SP_DEPLOY_DIR}/* ; do echo " "${sp_dir} done @@ -282,7 +282,7 @@ function clean_local_sp_env() { ############# # reset sps # ############# -reset_sp() { +function reset_sp() { stop_sp reset_sql_db reset_piece_store @@ -290,7 +290,7 @@ reset_sp() { make_config } -main() { +function main() { CMD=$1 case ${CMD} in --generate) diff --git a/test/e2e/localup_env/integration_config/config.yml b/test/e2e/localup_env/integration_config/config.yml deleted file mode 100644 index 632dc3ea6..000000000 --- a/test/e2e/localup_env/integration_config/config.yml +++ /dev/null @@ -1,42 +0,0 @@ -BSCEndpoint: "https://gnfd-bsc.qa.bnbchain.world" -BSCWSEndpoint: "ws://172.22.42.205:8545" -Account: - - ff0d780dd5a58f0a0ac8f3839d592711e3ae7fcf70dc695922b133ce31d8bcae -BSCChainid: 714 -BSCOwnerHex: ff0d780dd5a58f0a0ac8f3839d592711e3ae7fcf70dc695922b133ce31d8bcae -BSCTokenHub: 0x35dd738E306d32f2709824B6f744f188DA01D3C5 -BSCNormalAcc: - - a180523a5ac6cac101155057133c88353f098a05b1bed6f1076f3bc677ed8cd1 - - 414c619be8210cb18bb209cda801ba56fde57023a7783c9a58a72b7a75d9cc21 - - 8db9511454548dc4abecfd9b53f96405ca2330b7bfef7e6a14771167f130af88 - - c2a87e926baf966e77ca77cedcf38940e73326010c74a165f471c620ab8471ba - - 19e7ab6ecc8d5a9e2ce826b136aed6af749ad7f717074e17f3fb7629b7138a7e - - b993bd7e37eeb878c13f9c2666a26980a95b8c72fa64d004e943c5fb7170af5f - - e510e8ccaae3bafdbac53e892a211ee84ee2d8d651c787cbc5630c378b9cc939 - - f85ffe744eb4eb62db117f5b06d6b3846c22fb92baba7d9488e7ebbb5f55299d - - 787aff84b92b3e69d4656b9ff90af1fdb922e0f34f5f72ead4674fcd3464fd7a - - b3c3bfd7e98e4e6a336629499d10e6eaab408c475d26c8d8fc71fa532c339125 - - a1f49f935ada18aca859fb2fe4731a6ad0b9fd925b2fdbf085145b8d41fbd948 - -GreenfieldEndpoint: "http://localhost:26750" -GreenfieldGRPC: "localhost:9090" -GreenfieldChainId: "greenfield_9000-1741" -GreenfieldRoot: "20f92afe113b90e1faa241969e957ac091d80b920f84ffda80fc9d0588f62906" -GreenfieldValSecret: - - "20f92afe113b90e1faa241969e957ac091d80b920f84ffda80fc9d0588f62906" - -GreenfieldAccSecret: - - 9103dfb7750e3c58513184a37ce01cd87d5486671384e24417b6de3d7ea4cb2e - - db3414b2ff0ec038045141f3f2ab09cb29949997eafeb05e26d266618c48ce52 - - 1b3986151ea0d8ab2e3479f68c8eb548b5404af9e3febe745890091100c4bf7a - - b46f36707d37f5bca196ccd765e949cf281934a902ab370eb45d262fd2113bae - - 27a135c96319e895f290e5209892452c354b4e6f4238284e62892ff978f3741d - - 3d383f2607b1bed8a652899119879538a1e3a24b83688fe24475a88d93ab2752 - - ec7ad8d37887bfb514f51fa58a92c91b1c8e283693e0f10f24b9113b6cd36812 - - 3fceafabcdf62a81b47e2ad97ea3dbd76b12e7caad2a97e1fab16cdf5496bad8 - - 9f15e076a11fc0fc49e64626056f7be65b6a0d88f5a73499e090b4ea60c18b54 - - 5d8b771cd8d46cf802adfd3b2a28b71c5e9fb108fad76dccee8836f0d8d102da -GreenfieldNodeIP: - - "" -SPEndpoint: - - "127.0.0.1:9033" diff --git a/test/e2e/spworkflow/e2e_test.sh b/test/e2e/spworkflow/e2e_test.sh new file mode 100644 index 000000000..de182c0e7 --- /dev/null +++ b/test/e2e/spworkflow/e2e_test.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash + +#basedir=$(cd `dirname $0` || return; pwd) +workspace=${GITHUB_WORKSPACE} + +# some constants +GREENFIELD_REPO_TAG="v0.2.2-alpha.2" +GREENFIELD_CMD_BRANCH="dev-new-version" +MYSQL_USER="root" +MYSQL_PASSWORD="root" +MYSQL_ADDRESS="127.0.0.1:3306" +TEST_ACCOUNT_ADDRESS="0x76263999b87D08228eFB098F36d17363Acf40c2c" +TEST_ACCOUNT_PRIVATE_KEY="da942d31bc4034577f581057e4a3644404ac12828a84052f87086d508fdcf095" +BUCKET_NAME="spe2etestbucket" + +######################################### +# build and start Greenfield blockchain # +######################################### +function greenfield_chain() { + set -e + # build Greenfield chain + echo ${workspace} + cd ${workspace} + git clone https://github.com/bnb-chain/greenfield.git + cd greenfield/ + git checkout ${GREENFIELD_REPO_TAG} + make proto-gen & make build + + # start Greenfield chain + bash ./deployment/localup/localup.sh all 1 7 + bash ./deployment/localup/localup.sh export_sps 1 7 > sp.json + + # transfer some BNB tokens + transfer_account +} + +############################################# +# transfer some BNB tokens to test accounts # +############################################# +function transfer_account() { + set -e + cd ${workspace}/greenfield/ + ./build/bin/gnfd tx bank send validator0 ${TEST_ACCOUNT_ADDRESS} 500000000000000000000BNB --home ${workspace}/greenfield/deployment/localup/.local/validator0 --keyring-backend test --node http://localhost:26750 -y + sleep 2 + ./build/bin/gnfd q bank balances ${TEST_ACCOUNT_ADDRESS} --node http://localhost:26750 +} + +################################# +# build and start Greenfield SP # +################################# +function greenfield_sp() { + set -e + cd ${workspace} + make install-tools + make build + bash ./deployment/localup/localup.sh --generate ${workspace}/greenfield/sp.json ${MYSQL_USER} ${MYSQL_PASSWORD} ${MYSQL_ADDRESS} + bash ./deployment/localup/localup.sh --reset + bash ./deployment/localup/localup.sh --start + sleep 5 + tail -n 1000 deployment/localup/local_env/sp0/gnfd-sp.log + ps -ef | grep gnfd-sp | wc -l +} + +############################################ +# build Greenfield cmd and set cmd config # +############################################ +function build_cmd() { + set -e + cd ${workspace} + # build sp + git clone https://github.com/bnb-chain/greenfield-cmd.git + cd greenfield-cmd/ + git checkout ${GREENFIELD_CMD_BRANCH} + make build + cd build/ + + # generate a keystore file to manage private key information + touch key.txt & echo ${TEST_ACCOUNT_PRIVATE_KEY} > key.txt + touch password.txt & echo "test_sp_function" > password.txt + ./gnfd-cmd --home ./ keystore generate --privKeyFile key.txt --passwordfile password.txt + + # construct config.toml + touch config.toml + { + echo rpcAddr = \"http://localhost:26750\" + echo chainId = \"greenfield_9000-121\" + } > config.toml +} + +###################### +# test create bucket # +###################### +function test_create_bucket() { + set -e + cd ${workspace}/greenfield-cmd/build/ + ./gnfd-cmd -c ./config.toml --home ./ sp ls + sleep 5 + ./gnfd-cmd -c ./config.toml --home ./ bucket create gnfd://${BUCKET_NAME} + ./gnfd-cmd -c ./config.toml --home ./ bucket head gnfd://${BUCKET_NAME} + sleep 10 +} + +########################################################### +# test upload and download file which size less than 16MB # +########################################################### +function test_file_size_less_than_16_mb() { + set -e + cd ${workspace}/greenfield-cmd/build/ + ./gnfd-cmd -c ./config.toml --home ./ object put --contentType "application/json" ${workspace}/test/e2e/spworkflow/testdata/example.json gnfd://${BUCKET_NAME} + sleep 16 + ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://spe2etestbucket/example.json ./test_data.json + check_md5 ${workspace}/test/e2e/spworkflow/testdata/example.json ./test_data.json + cat test_data.json +} + +############################################################## +# test upload and download file which size greater than 16MB # +############################################################## +function test_file_size_greater_than_16_mb() { + set -e + cd ${workspace}/greenfield-cmd/build/ + dd if=/dev/urandom of=./random_file bs=17M count=1 + ./gnfd-cmd -c ./config.toml --home ./ object put --contentType "application/octet-stream" ./random_file gnfd://${BUCKET_NAME}/random_file + sleep 16 + ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://spe2etestbucket/random_file ./new_random_file + sleep 10 + check_md5 ./random_file ./new_random_file +} + +################################## +# check two md5 whether is equal # +################################## +function check_md5() { + set -e + if [ $# != 2 ]; then + echo "failed to check md5 value; this function needs two args" + exit 1 + fi + file1=$1 + file2=$2 + md5_1=$(md5sum ${file1} | cut -d ' ' -f 1) + md5_2=$(md5sum ${file2} | cut -d ' ' -f 1) + echo ${md5_1} + echo ${md5_2} + + if [ "$md5_1" = "$md5_2" ]; then + echo "The md5 values are the same." + else + echo "The md5 values are different." + exit 1 + fi +} + +####################### +# run sp workflow e2e # +####################### +function run_e2e() { + set -e + test_create_bucket + test_file_size_less_than_16_mb + test_file_size_greater_than_16_mb +} + +function main() { + CMD=$1 + case ${CMD} in + --startChain) + greenfield_chain + ;; + --startSP) + greenfield_sp + ;; + --buildCmd) + build_cmd + ;; + --runTest) + run_e2e + ;; + esac +} + +main $@ diff --git a/test/e2e/spworkflow/testdata/example.json b/test/e2e/spworkflow/testdata/example.json new file mode 100644 index 000000000..28e8a8af7 --- /dev/null +++ b/test/e2e/spworkflow/testdata/example.json @@ -0,0 +1,33 @@ +{ + "glossary": { + "title": "example glossary", + "GlossDiv": { + "title": "S", + "GlossList": { + "GlossEntry": { + "ID": "SGML", + "SortAs": "SGML", + "GlossTerm": "Standard Generalized Markup Language", + "Acronym": "SGML", + "Abbrev": "ISO 8879:1986", + "GlossDef": { + "para": "A meta-markup language, used to create markup languages such as DocBook.", + "GlossSeeAlso": ["GML", "XML"] + }, + "GlossSee": "markup" + } + } + } + }, + "menu": { + "id": "file", + "value": "File", + "popup": { + "menuitem": [ + {"value": "New", "onclick": "CreateNewDoc()"}, + {"value": "Open", "onclick": "OpenDoc()"}, + {"value": "Close", "onclick": "CloseDoc()"} + ] + } + } +} From 7f39714921a3c853b710b6c4b8b768bcaeb8116b Mon Sep 17 00:00:00 2001 From: joeycli Date: Thu, 15 Jun 2023 10:45:52 +0800 Subject: [PATCH 10/78] feat: add replicate metrics (#599) (#602) --- modular/executor/execute_replicate.go | 42 +++++++++++++++++++++++++++ modular/executor/executor.go | 1 + 2 files changed, 43 insertions(+) diff --git a/modular/executor/execute_replicate.go b/modular/executor/execute_replicate.go index bd83e9ce4..65f3bc20d 100644 --- a/modular/executor/execute_replicate.go +++ b/modular/executor/execute_replicate.go @@ -42,13 +42,19 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core rAppTask := &gfsptask.GfSpReplicatePieceApprovalTask{} rAppTask.InitApprovalReplicatePieceTask(task.GetObjectInfo(), task.GetStorageParams(), e.baseApp.TaskPriority(rAppTask), e.baseApp.OperateAddress()) + askReplicateApprovalTime := time.Now() approvals, err = e.AskReplicatePieceApproval(ctx, rAppTask, int(low), int(high), e.askReplicateApprovalTimeout) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_ask_p2p_approval_time").Observe(time.Since(askReplicateApprovalTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_p2p_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed get approvals", "error", err) return } + replicatePieceTotalTime := time.Now() err = e.handleReplicatePiece(ctx, task, approvals) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_object_time").Observe(time.Since(replicatePieceTotalTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_replicate_object_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to replicate piece", "error", err) return @@ -62,7 +68,10 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core SecondarySpAddresses: task.GetSecondaryAddresses(), SecondarySpSignatures: task.GetSecondarySignatures(), } + sealTime := time.Now() sealErr := e.sealObject(ctx, task, sealMsg) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_seal_object_time").Observe(time.Since(sealTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_seal_object_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) if sealErr == nil { task.SetSealed(true) } @@ -77,7 +86,10 @@ func (e *ExecuteModular) AskReplicatePieceApproval(ctx context.Context, task cor approvals []*gfsptask.GfSpReplicatePieceApprovalTask spInfo *sptypes.StorageProvider ) + p2pTime := time.Now() approvals, err = e.baseApp.GfSpClient().AskSecondaryReplicatePieceApproval(ctx, task, low, high, timeout) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_p2p_protocol_time").Observe(time.Since(p2pTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_p2p_protocol_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) if err != nil { return nil, err } @@ -85,6 +97,7 @@ func (e *ExecuteModular) AskReplicatePieceApproval(ctx context.Context, task cor log.CtxErrorw(ctx, "failed to get sufficient sp approval from p2p protocol") return nil, ErrInsufficientApproval } + spDBTime := time.Now() for _, approval := range approvals { spInfo, err = e.baseApp.GfSpDB().GetSpByAddress( approval.GetApprovedSpOperatorAddress(), @@ -96,6 +109,8 @@ func (e *ExecuteModular) AskReplicatePieceApproval(ctx context.Context, task cor approval.SetApprovedSpEndpoint(spInfo.GetEndpoint()) approval.SetApprovedSpApprovalAddress(spInfo.GetApprovalAddress()) } + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sp_db_time").Observe(time.Since(spDBTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sp_db_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) if len(approvals) < low { log.CtxErrorw(ctx, "failed to get sufficient sp info from db") return nil, ErrGfSpDB @@ -180,18 +195,25 @@ func (e *ExecuteModular) handleReplicatePiece(ctx context.Context, rTask coretas log.CtxDebugw(ctx, "success to replicate all pieces") return nil } + pieceTime := time.Now() for pIdx := uint32(0); pIdx < segCount; pIdx++ { pieceKey = e.baseApp.PieceOp().SegmentPieceKey(rTask.GetObjectInfo().Id.Uint64(), pIdx) + pieceTime := time.Now() segData, err := e.baseApp.PieceStore().GetPiece(ctx, pieceKey, 0, -1) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_get_piece_time").Observe(time.Since(pieceTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_get_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to get segment data form piece store", "error", err) rTask.SetError(err) return err } if rTask.GetObjectInfo().GetRedundancyType() == storagetypes.REDUNDANCY_EC_TYPE { + ecTime := time.Now() ecData, err := redundancy.EncodeRawSegment(segData, int(rTask.GetStorageParams().VersionedParams.GetRedundantDataChunkNum()), int(rTask.GetStorageParams().VersionedParams.GetRedundantParityChunkNum())) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_ec_time").Observe(time.Since(ecTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_ec_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to ec encode data", "error", err) rTask.SetError(err) @@ -202,7 +224,12 @@ func (e *ExecuteModular) handleReplicatePiece(ctx context.Context, rTask coretas doReplicateSegmentPiece(pIdx, segData) } } + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_all_piece_time").Observe(time.Since(pieceTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_all_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + doneTime := time.Now() doneReplicate() + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_replicate_time").Observe(time.Since(doneTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_replicate_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) } } @@ -220,15 +247,21 @@ func (e *ExecuteModular) doReplicatePiece(ctx context.Context, waitGroup *sync.W e.baseApp.TaskPriority(rTask), replicateIdx, int32(pieceIdx), int64(len(data))) receive.SetPieceChecksum(hash.GenerateChecksum(data)) ctx = log.WithValue(ctx, log.CtxKeyTask, receive.Key().String()) + signTime := time.Now() signature, err = e.baseApp.GfSpClient().SignReceiveTask(ctx, receive) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_time").Observe(time.Since(signTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign receive task", "replicate_idx", replicateIdx, "piece_idx", pieceIdx, "error", err) return } receive.SetSignature(signature) + replicateOnePieceTime := time.Now() err = e.baseApp.GfSpClient().ReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive, data) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_time").Observe(time.Since(replicateOnePieceTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to replicate piece", "replicate_idx", replicateIdx, "piece_idx", pieceIdx, "error", err) @@ -250,15 +283,21 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. receive := &gfsptask.GfSpReceivePieceTask{} receive.InitReceivePieceTask(rTask.GetObjectInfo(), rTask.GetStorageParams(), e.baseApp.TaskPriority(rTask), replicateIdx, -1, 0) + signTime := time.Now() taskSignature, err = e.baseApp.GfSpClient().SignReceiveTask(ctx, receive) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_time").Observe(time.Since(signTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign done receive task", "replicate_idx", replicateIdx, "error", err) return nil, nil, err } receive.SetSignature(taskSignature) + doneReplicateTime := time.Now() integrity, signature, err = e.baseApp.GfSpClient().DoneReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_time").Observe(time.Since(doneReplicateTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to done replicate piece", "endpoint", approval.GetApprovedSpEndpoint(), @@ -271,10 +310,13 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. "secondary_sp_len", len(rTask.GetObjectInfo().GetSecondarySpAddresses())) return nil, nil, ErrReplicateIdsOutOfBounds } + veritySignatureTime := time.Now() err = veritySignature(ctx, rTask.GetObjectInfo().Id.Uint64(), integrity, rTask.GetObjectInfo().GetChecksums()[replicateIdx+1], approval.GetApprovedSpOperatorAddress(), approval.GetApprovedSpApprovalAddress(), signature) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_verity_seal_signature_time").Observe(time.Since(veritySignatureTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_verity_seal_signature_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed verify secondary signature", "endpoint", approval.GetApprovedSpEndpoint(), diff --git a/modular/executor/executor.go b/modular/executor/executor.go index 05cf3867f..1251b8430 100644 --- a/modular/executor/executor.go +++ b/modular/executor/executor.go @@ -132,6 +132,7 @@ func (e *ExecuteModular) AskTask(ctx context.Context, limit corercmgr.Limit) { metrics.ExecutorReplicatePieceTaskCounter.WithLabelValues(e.Name()).Inc() atomic.AddInt64(&e.doingReplicatePieceTaskCnt, 1) defer atomic.AddInt64(&e.doingReplicatePieceTaskCnt, -1) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_schedule_replicate_time").Observe(time.Since(time.Unix(t.GetCreateTime(), 0)).Seconds()) e.HandleReplicatePieceTask(ctx, t) case *gfsptask.GfSpSealObjectTask: metrics.ExecutorSealObjectTaskCounter.WithLabelValues(e.Name()).Inc() From d558a8854a9812bd6bd02512fcdd437479c337c7 Mon Sep 17 00:00:00 2001 From: joeycli Date: Thu, 15 Jun 2023 15:01:41 +0800 Subject: [PATCH 11/78] feat: opt executor goroutine model (#604) * feat: opt executor goroutine model * fix: reject fail log --- base/gnfd/gnfd_service.go | 3 +- core/module/modular.go | 2 +- core/module/null_modular.go | 2 +- modular/executor/execute_replicate.go | 24 ++++----- modular/executor/executor.go | 71 ++++++++++++++++----------- modular/executor/executor_options.go | 6 ++- modular/manager/manager.go | 5 +- 7 files changed, 65 insertions(+), 48 deletions(-) diff --git a/base/gnfd/gnfd_service.go b/base/gnfd/gnfd_service.go index e57f14301..ca36dd9bf 100644 --- a/base/gnfd/gnfd_service.go +++ b/base/gnfd/gnfd_service.go @@ -178,15 +178,16 @@ func (g *Gnfd) ListenObjectSeal(ctx context.Context, objectID uint64, timeoutHei err error ) for i := 0; i < timeoutHeight; i++ { - time.Sleep(ExpectedOutputBlockInternal * time.Second) objectInfo, err = g.QueryObjectInfoByID(ctx, strconv.FormatUint(objectID, 10)) if err != nil { + time.Sleep(ExpectedOutputBlockInternal * time.Second) continue } if objectInfo.GetObjectStatus() == storagetypes.OBJECT_STATUS_SEALED { log.CtxDebugw(ctx, "succeed to listen object stat") return true, nil } + time.Sleep(ExpectedOutputBlockInternal * time.Second) } if err == nil { log.CtxErrorw(ctx, "seal object timeout", "object_id", objectID) diff --git a/core/module/modular.go b/core/module/modular.go index 9a238d64a..a0a095e82 100644 --- a/core/module/modular.go +++ b/core/module/modular.go @@ -130,7 +130,7 @@ type Downloader interface { type TaskExecutor interface { Modular // AskTask asks the task by remaining limit from manager modular. - AskTask(ctx context.Context, remaining rcmgr.Limit) + AskTask(ctx context.Context) error // HandleReplicatePieceTask handles the ReplicatePieceTask that is asked from // manager modular. HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) diff --git a/core/module/null_modular.go b/core/module/null_modular.go index c846483ae..2770d027f 100644 --- a/core/module/null_modular.go +++ b/core/module/null_modular.go @@ -151,7 +151,7 @@ func (*NilModular) PreChallengePiece(context.Context, task.ChallengePieceTask) e func (*NilModular) HandleChallengePiece(context.Context, task.ChallengePieceTask) ([]byte, [][]byte, []byte, error) { return nil, nil, nil, ErrNilModular } -func (*NilModular) AskTask(context.Context, rcmgr.Limit) {} +func (*NilModular) AskTask(context.Context) error { return nil } func (*NilModular) PostChallengePiece(context.Context, task.ChallengePieceTask) {} func (*NilModular) ReportTask(context.Context, task.Task) error { return ErrNilModular } func (*NilModular) HandleReplicatePieceTask(context.Context, task.ReplicatePieceTask) {} diff --git a/modular/executor/execute_replicate.go b/modular/executor/execute_replicate.go index 65f3bc20d..c012da160 100644 --- a/modular/executor/execute_replicate.go +++ b/modular/executor/execute_replicate.go @@ -46,7 +46,7 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core approvals, err = e.AskReplicatePieceApproval(ctx, rAppTask, int(low), int(high), e.askReplicateApprovalTimeout) metrics.PerfUploadTimeHistogram.WithLabelValues("background_ask_p2p_approval_time").Observe(time.Since(askReplicateApprovalTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_p2p_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_p2p_end_time").Observe(time.Since(startReplicateTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed get approvals", "error", err) return @@ -54,7 +54,7 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core replicatePieceTotalTime := time.Now() err = e.handleReplicatePiece(ctx, task, approvals) metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_object_time").Observe(time.Since(replicatePieceTotalTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_replicate_object_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_replicate_object_end_time").Observe(time.Since(startReplicateTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to replicate piece", "error", err) return @@ -71,7 +71,7 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core sealTime := time.Now() sealErr := e.sealObject(ctx, task, sealMsg) metrics.PerfUploadTimeHistogram.WithLabelValues("background_seal_object_time").Observe(time.Since(sealTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_seal_object_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_seal_object_end_time").Observe(time.Since(startReplicateTime).Seconds()) if sealErr == nil { task.SetSealed(true) } @@ -89,7 +89,7 @@ func (e *ExecuteModular) AskReplicatePieceApproval(ctx context.Context, task cor p2pTime := time.Now() approvals, err = e.baseApp.GfSpClient().AskSecondaryReplicatePieceApproval(ctx, task, low, high, timeout) metrics.PerfUploadTimeHistogram.WithLabelValues("background_p2p_protocol_time").Observe(time.Since(p2pTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_p2p_protocol_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_p2p_protocol_end_time").Observe(time.Since(p2pTime).Seconds()) if err != nil { return nil, err } @@ -110,7 +110,7 @@ func (e *ExecuteModular) AskReplicatePieceApproval(ctx context.Context, task cor approval.SetApprovedSpApprovalAddress(spInfo.GetApprovalAddress()) } metrics.PerfUploadTimeHistogram.WithLabelValues("background_sp_db_time").Observe(time.Since(spDBTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_sp_db_end_time").Observe(time.Since(time.Unix(task.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sp_db_end_time").Observe(time.Since(p2pTime).Seconds()) if len(approvals) < low { log.CtxErrorw(ctx, "failed to get sufficient sp info from db") return nil, ErrGfSpDB @@ -225,11 +225,11 @@ func (e *ExecuteModular) handleReplicatePiece(ctx context.Context, rTask coretas } } metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_all_piece_time").Observe(time.Since(pieceTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_all_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_all_piece_end_time").Observe(time.Since(pieceTime).Seconds()) doneTime := time.Now() doneReplicate() metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_replicate_time").Observe(time.Since(doneTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_replicate_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_replicate_piece_end_time").Observe(time.Since(pieceTime).Seconds()) } } @@ -250,7 +250,7 @@ func (e *ExecuteModular) doReplicatePiece(ctx context.Context, waitGroup *sync.W signTime := time.Now() signature, err = e.baseApp.GfSpClient().SignReceiveTask(ctx, receive) metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_time").Observe(time.Since(signTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign receive task", "replicate_idx", replicateIdx, "piece_idx", pieceIdx, "error", err) @@ -261,7 +261,7 @@ func (e *ExecuteModular) doReplicatePiece(ctx context.Context, waitGroup *sync.W err = e.baseApp.GfSpClient().ReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive, data) metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_time").Observe(time.Since(replicateOnePieceTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_end_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to replicate piece", "replicate_idx", replicateIdx, "piece_idx", pieceIdx, "error", err) @@ -286,7 +286,7 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. signTime := time.Now() taskSignature, err = e.baseApp.GfSpClient().SignReceiveTask(ctx, receive) metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_time").Observe(time.Since(signTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_sign_receive_end_time").Observe(time.Since(signTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to sign done receive task", "replicate_idx", replicateIdx, "error", err) @@ -297,7 +297,7 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. integrity, signature, err = e.baseApp.GfSpClient().DoneReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive) metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_time").Observe(time.Since(doneReplicateTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_end_time").Observe(time.Since(signTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed to done replicate piece", "endpoint", approval.GetApprovedSpEndpoint(), @@ -316,7 +316,7 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. approval.GetApprovedSpOperatorAddress(), approval.GetApprovedSpApprovalAddress(), signature) metrics.PerfUploadTimeHistogram.WithLabelValues("background_verity_seal_signature_time").Observe(time.Since(veritySignatureTime).Seconds()) - metrics.PerfUploadTimeHistogram.WithLabelValues("background_verity_seal_signature_end_time").Observe(time.Since(time.Unix(rTask.GetCreateTime(), 0)).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("background_verity_seal_signature_end_time").Observe(time.Since(signTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "failed verify secondary signature", "endpoint", approval.GetApprovedSpEndpoint(), diff --git a/modular/executor/executor.go b/modular/executor/executor.go index 1251b8430..25e742bdf 100644 --- a/modular/executor/executor.go +++ b/modular/executor/executor.go @@ -58,7 +58,22 @@ func (e *ExecuteModular) Start(ctx context.Context) error { } func (e *ExecuteModular) eventLoop(ctx context.Context) { - askTaskTicker := time.NewTicker(time.Duration(e.askTaskInterval) * time.Second) + for i := int64(0); i < e.maxExecuteNum; i++ { + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + default: + err := e.AskTask(ctx) + if err != nil { + log.CtxErrorw(ctx, "failed to handle ask task, will hold on", "error", err) + time.Sleep(time.Duration(DefaultSleepInterval) * time.Millisecond) + } + } + } + }(ctx) + } + statisticsTicker := time.NewTicker(time.Duration(e.statisticsOutputInterval) * time.Second) for { select { @@ -66,30 +81,6 @@ func (e *ExecuteModular) eventLoop(ctx context.Context) { return case <-statisticsTicker.C: log.CtxInfo(ctx, e.Statistics()) - case <-askTaskTicker.C: - metrics.MaxTaskNumberGauge.WithLabelValues(e.Name()).Set(float64(atomic.LoadInt64(&e.maxExecuteNum))) - metrics.RunningTaskNumberGauge.WithLabelValues(e.Name()).Set(float64(atomic.LoadInt64(&e.executingNum))) - go func() { - defer atomic.AddInt64(&e.executingNum, -1) - if atomic.AddInt64(&e.executingNum, 1) > atomic.LoadInt64(&e.maxExecuteNum) { - log.CtxErrorw(ctx, "failed to ask due to asking number greater than max limit number") - return - } - limit, err := e.scope.RemainingResource() - if err != nil { - log.CtxErrorw(ctx, "failed to get remaining resource", "error", err) - return - } - metrics.RemainingMemoryGauge.WithLabelValues(e.Name()).Set(float64(limit.GetMemoryLimit())) - metrics.RemainingTaskGauge.WithLabelValues(e.Name()).Set(float64(limit.GetTaskTotalLimit())) - metrics.RemainingHighPriorityTaskGauge.WithLabelValues(e.Name()).Set( - float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityHigh))) - metrics.RemainingMediumPriorityTaskGauge.WithLabelValues(e.Name()).Set( - float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityMedium))) - metrics.RemainingLowTaskGauge.WithLabelValues(e.Name()).Set( - float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityLow))) - e.AskTask(ctx, limit) - }() } } } @@ -104,25 +95,44 @@ func (e *ExecuteModular) omitError(err error) bool { return false } -func (e *ExecuteModular) AskTask(ctx context.Context, limit corercmgr.Limit) { +func (e *ExecuteModular) AskTask(ctx context.Context) error { + atomic.AddInt64(&e.executingNum, 1) + defer atomic.AddInt64(&e.executingNum, -1) + + limit, err := e.scope.RemainingResource() + if err != nil { + log.CtxErrorw(ctx, "failed to get remaining resource", "error", err) + return err + } + + metrics.RemainingMemoryGauge.WithLabelValues(e.Name()).Set(float64(limit.GetMemoryLimit())) + metrics.RemainingTaskGauge.WithLabelValues(e.Name()).Set(float64(limit.GetTaskTotalLimit())) + metrics.RemainingHighPriorityTaskGauge.WithLabelValues(e.Name()).Set( + float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityHigh))) + metrics.RemainingMediumPriorityTaskGauge.WithLabelValues(e.Name()).Set( + float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityMedium))) + metrics.RemainingLowTaskGauge.WithLabelValues(e.Name()).Set( + float64(limit.GetTaskLimit(corercmgr.ReserveTaskPriorityLow))) + askTask, err := e.baseApp.GfSpClient().AskTask(ctx, limit) if err != nil { if e.omitError(err) { - return + return err } log.CtxErrorw(ctx, "failed to ask task", "remaining", limit.String(), "error", err) - return + return err } // double confirm the safe task if askTask == nil { log.CtxErrorw(ctx, "failed to ask task due to dangling pointer", "remaining", limit.String(), "error", err) - return + return ErrDanglingPointer } span, err := e.ReserveResource(ctx, askTask.EstimateLimit().ScopeStat()) if err != nil { log.CtxErrorw(ctx, "failed to reserve resource", "task_require", askTask.EstimateLimit().String(), "remaining", limit.String(), "error", err) + return err } defer e.ReleaseResource(ctx, span) defer e.ReportTask(ctx, askTask) @@ -163,6 +173,7 @@ func (e *ExecuteModular) AskTask(ctx context.Context, limit corercmgr.Limit) { log.CtxErrorw(ctx, "unsupported task type") } log.CtxDebugw(ctx, "finish to handle task") + return nil } func (e *ExecuteModular) ReportTask( @@ -202,7 +213,7 @@ func (e *ExecuteModular) ReleaseResource( func (e *ExecuteModular) Statistics() string { return fmt.Sprintf( "maxAsk[%d], asking[%d], replicate[%d], seal[%d], receive[%d], gcObject[%d], gcZombie[%d], gcMeta[%d]", - atomic.LoadInt64(&e.maxExecuteNum), atomic.LoadInt64(&e.executingNum), + &e.maxExecuteNum, atomic.LoadInt64(&e.executingNum), atomic.LoadInt64(&e.doingReplicatePieceTaskCnt), atomic.LoadInt64(&e.doingSpSealObjectTaskCnt), atomic.LoadInt64(&e.doingReceivePieceTaskCnt), diff --git a/modular/executor/executor_options.go b/modular/executor/executor_options.go index f37fb4dd1..7f696b718 100644 --- a/modular/executor/executor_options.go +++ b/modular/executor/executor_options.go @@ -8,7 +8,7 @@ import ( const ( // DefaultExecutorMaxExecuteNum defines the default max parallel execute task number. - DefaultExecutorMaxExecuteNum int64 = 1024 + DefaultExecutorMaxExecuteNum int64 = 16 // DefaultExecutorAskTaskInterval defines the default ask task interval from manager. DefaultExecutorAskTaskInterval int = 1 // DefaultExecutorAskReplicateApprovalTimeout defines the ask replicate piece approval @@ -34,6 +34,9 @@ const ( // DefaultStatisticsOutputInterval defines the default interval for output statistics info, // it is used to log and debug. DefaultStatisticsOutputInterval int = 60 + // DefaultSleepInterval defines the sleep interval when failed to ask task + // it is millisecond level + DefaultSleepInterval = 100 ) func NewExecuteModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (coremodule.Modular, error) { @@ -46,6 +49,7 @@ func NewExecuteModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (co func DefaultExecutorOptions(executor *ExecuteModular, cfg *gfspconfig.GfSpConfig) error { if cfg.Executor.MaxExecuteNumber == 0 { + // TODO:: DefaultExecutorMaxExecuteNum should core_num * multiple, the core_num is compatible with docker cfg.Executor.MaxExecuteNumber = DefaultExecutorMaxExecuteNum } executor.maxExecuteNum = cfg.Executor.MaxExecuteNumber diff --git a/modular/manager/manager.go b/modular/manager/manager.go index 3b5a7633a..b84453239 100644 --- a/modular/manager/manager.go +++ b/modular/manager/manager.go @@ -469,12 +469,13 @@ func (m *ManageModular) RejectUnSealObject(ctx context.Context, object *storaget for i := 0; i < RejectUnSealObjectRetry; i++ { err = m.baseApp.GfSpClient().RejectUnSealObject(ctx, rejectUnSealObjectMsg) if err != nil { - log.CtxErrorw(ctx, "failed to reject unseal object", "retry", i, "error", err) time.Sleep(RejectUnSealObjectTimeout * time.Second) } else { - break + log.CtxDebugw(ctx, "succeed to reject unseal object") + return nil } } + log.CtxErrorw(ctx, "failed to reject unseal object", "error", err) return err } From 45ed05a2f4718d405ca174211d72b59d7e5f602d Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Thu, 15 Jun 2023 16:26:48 +0800 Subject: [PATCH 12/78] fix: fix variable names for standards (#520) * fix: fix config name for standards * fix: fix operateAddress to operatorAddress * fix: fix chainID in env.info * fix: fix gateway config name --------- Co-authored-by: VM --- Makefile | 2 +- base/gfspapp/app.go | 18 +++++----- base/gfspapp/app_options.go | 52 +++++++++++++-------------- base/gfspapp/grpc_server.go | 6 ++-- base/gfspapp/manage_server.go | 20 +++++------ base/gfspconfig/config.go | 12 +++---- cmd/command/query.go | 6 ++-- cmd/utils/init_env.go | 26 +++++++------- deployment/localup/env.info | 2 +- deployment/localup/localup.sh | 12 +++---- modular/authorizer/authorizer.go | 22 ++++++------ modular/executor/execute_replicate.go | 4 +-- modular/executor/execute_task.go | 10 +++--- modular/gater/admin_handler.go | 4 +-- modular/gater/auth_handler.go | 2 +- modular/gater/gater_options.go | 18 +++++----- modular/gater/object_handler.go | 5 ++- modular/manager/manager.go | 6 ++-- modular/p2p/p2pnode/approval.go | 6 ++-- modular/p2p/p2pnode/node.go | 4 +-- modular/p2p/p2pnode/ping.go | 2 +- 21 files changed, 119 insertions(+), 120 deletions(-) diff --git a/Makefile b/Makefile index 43f91c0f2..7fbb8e1a1 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ test: # go test -cover ./... clean: - rm -rf ./service/types/*.pb.go && rm -rf ./service/*/types/*.pb.go + rm -rf ./base/types/*/*.pb.go && rm -rf ./modular/metadata/types/*.pb.go && rm -rf ./store/types/*.pb.go lint: golangci-lint run --fix diff --git a/base/gfspapp/app.go b/base/gfspapp/app.go index 8eea43ac0..29bae3034 100644 --- a/base/gfspapp/app.go +++ b/base/gfspapp/app.go @@ -21,10 +21,10 @@ const ( ) type GfSpBaseApp struct { - appID string - grpcAddress string - operateAddress string - chainID string + appID string + grpcAddress string + operatorAddress string + chainID string server *grpc.Server client *gfspclient.GfSpClient @@ -99,9 +99,9 @@ func (g *GfSpBaseApp) Consensus() consensus.Consensus { return g.chain } -// OperateAddress returns the sp operator address. -func (g *GfSpBaseApp) OperateAddress() string { - return g.operateAddress +// OperatorAddress returns the sp operator address. +func (g *GfSpBaseApp) OperatorAddress() string { + return g.operatorAddress } // ChainID returns the chain ID used by this sp instance @@ -147,7 +147,7 @@ func (g *GfSpBaseApp) ResourceManager() corercmgr.ResourceManager { // Start the GfSpBaseApp and blocks the progress until signal. func (g *GfSpBaseApp) Start(ctx context.Context) error { - err := g.StartRpcServer(ctx) + err := g.StartRPCServer(ctx) if err != nil { return err } @@ -160,7 +160,7 @@ func (g *GfSpBaseApp) Start(ctx context.Context) error { // close recycles the GfSpBaseApp resource on the stop time. func (g *GfSpBaseApp) close(ctx context.Context) error { - g.StopRpcServer(ctx) + g.StopRPCServer(ctx) g.GfSpClient().Close() g.rcmgr.Close() g.chain.Close() diff --git a/base/gfspapp/app_options.go b/base/gfspapp/app_options.go index fc9e0c39b..1f3ff751d 100644 --- a/base/gfspapp/app_options.go +++ b/base/gfspapp/app_options.go @@ -26,15 +26,15 @@ import ( const ( // DefaultGfSpAppIDPrefix defines the default app id prefix. DefaultGfSpAppIDPrefix = "gfsp" - // DefaultGrpcAddress defines the default Grpc address. - DefaultGrpcAddress = "localhost:9333" + // DefaultGRPCAddress defines the default gRPC address. + DefaultGRPCAddress = "localhost:9333" // DefaultMetricsAddress defines the default metrics service address. DefaultMetricsAddress = "localhost:24367" - // DefaultPprofAddress defines the default pprof service address. - DefaultPprofAddress = "localhost:24368" + // DefaultPProfAddress defines the default pprof service address. + DefaultPProfAddress = "localhost:24368" - // DefaultChainID defines the default greenfield chain ID. - DefaultChainID = "greenfield_9000-1741" + // DefaultChainID defines the default greenfield chainID. + DefaultChainID = "greenfield_9000-121" // DefaultChainAddress defines the default greenfield address. DefaultChainAddress = "http://localhost:26750" @@ -59,11 +59,11 @@ func DefaultStaticOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error { cfg.AppID = DefaultGfSpAppIDPrefix + "-" + servers } app.appID = cfg.AppID - if cfg.GrpcAddress == "" { - cfg.GrpcAddress = DefaultGrpcAddress + if cfg.GRPCAddress == "" { + cfg.GRPCAddress = DefaultGRPCAddress } - app.grpcAddress = cfg.GrpcAddress - app.operateAddress = cfg.SpAccount.SpOperateAddress + app.grpcAddress = cfg.GRPCAddress + app.operatorAddress = cfg.SpAccount.SpOperatorAddress app.chainID = cfg.Chain.ChainID app.uploadSpeed = cfg.Task.UploadTaskSpeed app.downloadSpeed = cfg.Task.DownloadTaskSpeed @@ -96,34 +96,34 @@ func DefaultStaticOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error { func DefaultGfSpClientOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error { if cfg.Endpoint.ApproverEndpoint == "" { - cfg.Endpoint.ApproverEndpoint = cfg.GrpcAddress + cfg.Endpoint.ApproverEndpoint = cfg.GRPCAddress } if cfg.Endpoint.ManagerEndpoint == "" { - cfg.Endpoint.ManagerEndpoint = cfg.GrpcAddress + cfg.Endpoint.ManagerEndpoint = cfg.GRPCAddress } if cfg.Endpoint.DownloaderEndpoint == "" { - cfg.Endpoint.DownloaderEndpoint = cfg.GrpcAddress + cfg.Endpoint.DownloaderEndpoint = cfg.GRPCAddress } if cfg.Endpoint.ReceiverEndpoint == "" { - cfg.Endpoint.ReceiverEndpoint = cfg.GrpcAddress + cfg.Endpoint.ReceiverEndpoint = cfg.GRPCAddress } if cfg.Endpoint.MetadataEndpoint == "" { - cfg.Endpoint.MetadataEndpoint = cfg.GrpcAddress + cfg.Endpoint.MetadataEndpoint = cfg.GRPCAddress } if cfg.Endpoint.MetadataEndpoint == "" { - cfg.Endpoint.MetadataEndpoint = cfg.GrpcAddress + cfg.Endpoint.MetadataEndpoint = cfg.GRPCAddress } if cfg.Endpoint.UploaderEndpoint == "" { - cfg.Endpoint.UploaderEndpoint = cfg.GrpcAddress + cfg.Endpoint.UploaderEndpoint = cfg.GRPCAddress } if cfg.Endpoint.P2PEndpoint == "" { - cfg.Endpoint.P2PEndpoint = cfg.GrpcAddress + cfg.Endpoint.P2PEndpoint = cfg.GRPCAddress } if cfg.Endpoint.SignerEndpoint == "" { - cfg.Endpoint.SignerEndpoint = cfg.GrpcAddress + cfg.Endpoint.SignerEndpoint = cfg.GRPCAddress } if cfg.Endpoint.AuthorizerEndpoint == "" { - cfg.Endpoint.AuthorizerEndpoint = cfg.GrpcAddress + cfg.Endpoint.AuthorizerEndpoint = cfg.GRPCAddress } app.client = gfspclient.NewGfSpClient( cfg.Endpoint.ApproverEndpoint, @@ -407,10 +407,10 @@ func DefaultGfSpMetricOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error if cfg.Monitor.DisableMetrics { app.metrics = &coremodule.NullModular{} } - if cfg.Monitor.MetricsHttpAddress == "" { - cfg.Monitor.MetricsHttpAddress = DefaultMetricsAddress + if cfg.Monitor.MetricsHTTPAddress == "" { + cfg.Monitor.MetricsHTTPAddress = DefaultMetricsAddress } - app.metrics = metrics.NewMetrics(cfg.Monitor.MetricsHttpAddress) + app.metrics = metrics.NewMetrics(cfg.Monitor.MetricsHTTPAddress) app.RegisterServices(app.metrics) return nil } @@ -419,10 +419,10 @@ func DefaultGfSpPprofOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error if cfg.Monitor.DisablePProf { app.pprof = &coremodule.NullModular{} } - if cfg.Monitor.PProfHttpAddress == "" { - cfg.Monitor.PProfHttpAddress = DefaultPprofAddress + if cfg.Monitor.PProfHTTPAddress == "" { + cfg.Monitor.PProfHTTPAddress = DefaultPProfAddress } - app.pprof = pprof.NewPProf(cfg.Monitor.PProfHttpAddress) + app.pprof = pprof.NewPProf(cfg.Monitor.PProfHTTPAddress) app.RegisterServices(app.pprof) return nil } diff --git a/base/gfspapp/grpc_server.go b/base/gfspapp/grpc_server.go index 7ade42eba..c610377b5 100644 --- a/base/gfspapp/grpc_server.go +++ b/base/gfspapp/grpc_server.go @@ -44,7 +44,7 @@ func (g *GfSpBaseApp) newRpcServer(options ...grpc.ServerOption) { reflection.Register(g.server) } -func (g *GfSpBaseApp) StartRpcServer(ctx context.Context) error { +func (g *GfSpBaseApp) StartRPCServer(ctx context.Context) error { lis, err := net.Listen("tcp", g.grpcAddress) if err != nil { log.Errorw("failed to listen tcp address", "address", g.grpcAddress, "error", err) @@ -58,12 +58,12 @@ func (g *GfSpBaseApp) StartRpcServer(ctx context.Context) error { return nil } -func (g *GfSpBaseApp) StopRpcServer(ctx context.Context) error { +func (g *GfSpBaseApp) StopRPCServer(ctx context.Context) error { g.server.GracefulStop() return nil } -func RpcRemoteAddress(ctx context.Context) string { +func GetRPCRemoteAddress(ctx context.Context) string { var addr string if pr, ok := peer.FromContext(ctx); ok { if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok { diff --git a/base/gfspapp/manage_server.go b/base/gfspapp/manage_server.go index 5bc579333..8bf3f1d02 100644 --- a/base/gfspapp/manage_server.go +++ b/base/gfspapp/manage_server.go @@ -111,7 +111,7 @@ func (g *GfSpBaseApp) OnAskTask(ctx context.Context, limit corercmgr.Limit) (cor gfspTask.IncRetry() gfspTask.SetError(nil) gfspTask.SetUpdateTime(time.Now().Unix()) - gfspTask.SetAddress(RpcRemoteAddress(ctx)) + gfspTask.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxDebugw(ctx, "succeed to dispatch task", "info", gfspTask.Info()) return gfspTask, nil } @@ -130,7 +130,7 @@ func (g *GfSpBaseApp) GfSpReportTask(ctx context.Context, req *gfspserver.GfSpRe case *gfspserver.GfSpReportTaskRequest_UploadObjectTask: task := t.UploadObjectTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) metrics.UploadObjectTaskTimeHistogram.WithLabelValues(g.manager.Name()).Observe( @@ -146,7 +146,7 @@ func (g *GfSpBaseApp) GfSpReportTask(ctx context.Context, req *gfspserver.GfSpRe case *gfspserver.GfSpReportTaskRequest_ReplicatePieceTask: task := t.ReplicatePieceTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) metrics.ReplicateAndSealTaskTimeHistogram.WithLabelValues(g.manager.Name()).Observe( @@ -162,7 +162,7 @@ func (g *GfSpBaseApp) GfSpReportTask(ctx context.Context, req *gfspserver.GfSpRe case *gfspserver.GfSpReportTaskRequest_SealObjectTask: task := t.SealObjectTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) metrics.SealObjectTaskTimeHistogram.WithLabelValues(g.manager.Name()).Observe( @@ -175,7 +175,7 @@ func (g *GfSpBaseApp) GfSpReportTask(ctx context.Context, req *gfspserver.GfSpRe case *gfspserver.GfSpReportTaskRequest_ReceivePieceTask: task := t.ReceivePieceTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) metrics.ReceiveTaskTimeHistogram.WithLabelValues(g.manager.Name()).Observe( @@ -188,35 +188,35 @@ func (g *GfSpBaseApp) GfSpReportTask(ctx context.Context, req *gfspserver.GfSpRe case *gfspserver.GfSpReportTaskRequest_GcObjectTask: task := t.GcObjectTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) err = g.manager.HandleGCObjectTask(ctx, t.GcObjectTask) case *gfspserver.GfSpReportTaskRequest_GcZombiePieceTask: task := t.GcZombiePieceTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) err = g.manager.HandleGCZombiePieceTask(ctx, t.GcZombiePieceTask) case *gfspserver.GfSpReportTaskRequest_GcMetaTask: task := t.GcMetaTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) err = g.manager.HandleGCMetaTask(ctx, t.GcMetaTask) case *gfspserver.GfSpReportTaskRequest_DownloadObjectTask: task := t.DownloadObjectTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) err = g.manager.HandleDownloadObjectTask(ctx, t.DownloadObjectTask) case *gfspserver.GfSpReportTaskRequest_ChallengePieceTask: task := t.ChallengePieceTask ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) - task.SetAddress(RpcRemoteAddress(ctx)) + task.SetAddress(GetRPCRemoteAddress(ctx)) log.CtxInfow(ctx, "begin to handle reported task", "task_info", task.Info()) err = g.manager.HandleChallengePieceTask(ctx, t.ChallengePieceTask) diff --git a/base/gfspconfig/config.go b/base/gfspconfig/config.go index ad32712f8..b853f12b7 100644 --- a/base/gfspconfig/config.go +++ b/base/gfspconfig/config.go @@ -35,7 +35,7 @@ type Customize struct { type GfSpConfig struct { AppID string Server []string - GrpcAddress string + GRPCAddress string Customize *Customize SpDB storeconfig.SQLDBConfig BsDB storeconfig.SQLDBConfig @@ -90,7 +90,7 @@ type ChainConfig struct { } type SpAccountConfig struct { - SpOperateAddress string + SpOperatorAddress string OperatorPrivateKey string FundingPrivateKey string SealPrivateKey string @@ -124,8 +124,8 @@ type BucketConfig struct { } type GatewayConfig struct { - Domain string - HttpAddress string + DomainName string + HTTPAddress string } type ExecutorConfig struct { @@ -196,8 +196,8 @@ type TaskConfig struct { type MonitorConfig struct { DisableMetrics bool DisablePProf bool - MetricsHttpAddress string - PProfHttpAddress string + MetricsHTTPAddress string + PProfHTTPAddress string } type RcmgrConfig struct { diff --git a/cmd/command/query.go b/cmd/command/query.go index c220d0927..baaeb702e 100644 --- a/cmd/command/query.go +++ b/cmd/command/query.go @@ -141,7 +141,7 @@ func listErrorsAction(ctx *cli.Context) error { } func queryTasksAction(ctx *cli.Context) error { - endpoint := gfspapp.DefaultGrpcAddress + endpoint := gfspapp.DefaultGRPCAddress if ctx.IsSet(utils.ConfigFileFlag.Name) { cfg := &gfspconfig.GfSpConfig{} err := utils.LoadConfig(ctx.String(utils.ConfigFileFlag.Name), cfg) @@ -149,7 +149,7 @@ func queryTasksAction(ctx *cli.Context) error { log.Errorw("failed to load config file", "error", err) return err } - endpoint = cfg.GrpcAddress + endpoint = cfg.GRPCAddress } if ctx.IsSet(endpointFlag.Name) { endpoint = ctx.String(endpointFlag.Name) @@ -296,7 +296,7 @@ func getSegmentIntegrityAction(ctx *cli.Context) error { replicateIdx := -1 for i, addr := range objectInfo.GetSecondarySpAddresses() { - if strings.EqualFold(addr, cfg.SpAccount.SpOperateAddress) { + if strings.EqualFold(addr, cfg.SpAccount.SpOperatorAddress) { replicateIdx = i break } diff --git a/cmd/utils/init_env.go b/cmd/utils/init_env.go index d627a5655..8936f5070 100644 --- a/cmd/utils/init_env.go +++ b/cmd/utils/init_env.go @@ -35,13 +35,13 @@ func MakeConfig(ctx *cli.Context) (*gfspconfig.GfSpConfig, error) { cfg.Monitor.DisableMetrics = ctx.Bool(MetricsDisableFlag.Name) } if ctx.IsSet(utils.MetricsHTTPFlag.Name) { - cfg.Monitor.MetricsHttpAddress = ctx.String(utils.MetricsHTTPFlag.Name) + cfg.Monitor.MetricsHTTPAddress = ctx.String(utils.MetricsHTTPFlag.Name) } if ctx.IsSet(PProfDisableFlag.Name) { cfg.Monitor.DisablePProf = ctx.Bool(PProfDisableFlag.Name) } if ctx.IsSet(PProfHTTPFlag.Name) { - cfg.Monitor.PProfHttpAddress = ctx.String(PProfHTTPFlag.Name) + cfg.Monitor.PProfHTTPAddress = ctx.String(PProfHTTPFlag.Name) } if ctx.IsSet(DisableResourceManagerFlag.Name) { cfg.Rcmgr.DisableRcmgr = ctx.Bool(DisableResourceManagerFlag.Name) @@ -96,35 +96,35 @@ func initLog(ctx *cli.Context, cfg *gfspconfig.GfSpConfig) error { } func MakeGfSpClient(cfg *gfspconfig.GfSpConfig) *gfspclient.GfSpClient { - if len(cfg.GrpcAddress) == 0 { - cfg.GrpcAddress = gfspapp.DefaultGrpcAddress + if len(cfg.GRPCAddress) == 0 { + cfg.GRPCAddress = gfspapp.DefaultGRPCAddress } if len(cfg.Endpoint.ApproverEndpoint) == 0 { - cfg.Endpoint.ApproverEndpoint = cfg.GrpcAddress + cfg.Endpoint.ApproverEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.ManagerEndpoint) == 0 { - cfg.Endpoint.ManagerEndpoint = cfg.GrpcAddress + cfg.Endpoint.ManagerEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.DownloaderEndpoint) == 0 { - cfg.Endpoint.DownloaderEndpoint = cfg.GrpcAddress + cfg.Endpoint.DownloaderEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.ReceiverEndpoint) == 0 { - cfg.Endpoint.ReceiverEndpoint = cfg.GrpcAddress + cfg.Endpoint.ReceiverEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.MetadataEndpoint) == 0 { - cfg.Endpoint.MetadataEndpoint = cfg.GrpcAddress + cfg.Endpoint.MetadataEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.UploaderEndpoint) == 0 { - cfg.Endpoint.UploaderEndpoint = cfg.GrpcAddress + cfg.Endpoint.UploaderEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.P2PEndpoint) == 0 { - cfg.Endpoint.P2PEndpoint = cfg.GrpcAddress + cfg.Endpoint.P2PEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.SignerEndpoint) == 0 { - cfg.Endpoint.SignerEndpoint = cfg.GrpcAddress + cfg.Endpoint.SignerEndpoint = cfg.GRPCAddress } if len(cfg.Endpoint.AuthorizerEndpoint) == 0 { - cfg.Endpoint.AuthorizerEndpoint = cfg.GrpcAddress + cfg.Endpoint.AuthorizerEndpoint = cfg.GRPCAddress } client := gfspclient.NewGfSpClient( cfg.Endpoint.ApproverEndpoint, diff --git a/deployment/localup/env.info b/deployment/localup/env.info index 679057037..0abf28c66 100644 --- a/deployment/localup/env.info +++ b/deployment/localup/env.info @@ -1,7 +1,7 @@ #!/usr/bin/env bash # chain config -CHAIN_ID="greenfield_9000-1741" +CHAIN_ID="greenfield_9000-121" CHAIN_GRPC_ENDPOINT="localhost:9090" CHAIN_HTTP_ENDPOINT="localhost:26750" diff --git a/deployment/localup/localup.sh b/deployment/localup/localup.sh index 757c10026..802c8de57 100644 --- a/deployment/localup/localup.sh +++ b/deployment/localup/localup.sh @@ -91,7 +91,7 @@ function make_config() { source db.info source sp.info # app - sed -i -e "s/GrpcAddress = '.*'/GrpcAddress = '127.0.0.1:${cur_port}'/g" config.toml + sed -i -e "s/GRPCAddress = '.*'/GRPCAddress = '127.0.0.1:${cur_port}'/g" config.toml # db sed -i -e "s/User = '.*'/User = '${USER}'/g" config.toml @@ -104,7 +104,7 @@ function make_config() { sed -i -e "s/ChainAddress = \[.*\]/ChainAddress = \['http:\/\/${CHAIN_HTTP_ENDPOINT}'\]/g" config.toml # sp account - sed -i -e "s/SpOperateAddress = '.*'/SpOperateAddress = '${OPERATOR_ADDRESS}'/g" config.toml + sed -i -e "s/SpOperatorAddress = '.*'/SpOperatorAddress = '${OPERATOR_ADDRESS}'/g" config.toml sed -i -e "s/OperatorPrivateKey = '.*'/OperatorPrivateKey = '${OPERATOR_PRIVATE_KEY}'/g" config.toml sed -i -e "s/FundingPrivateKey = '.*'/FundingPrivateKey = '${FUNDING_PRIVATE_KEY}'/g" config.toml sed -i -e "s/SealPrivateKey = '.*'/SealPrivateKey = '${SEAL_PRIVATE_KEY}'/g" config.toml @@ -112,8 +112,8 @@ function make_config() { sed -i -e "s/GcPrivateKey = '.*'/GcPrivateKey = '${GC_PRIVATE_KEY}'/g" config.toml # gateway - sed -i -e "s/Domain = '.*'/Domain = 'gnfd.test-sp.com'/g" config.toml - sed -i -e "s/^HttpAddress = '.*'/HttpAddress = '${SP_ENDPOINT}'/g" config.toml + sed -i -e "s/DomainName = '.*'/DomainName = 'gnfd.test-sp.com'/g" config.toml + sed -i -e "s/^HTTPAddress = '.*'/HTTPAddress = '${SP_ENDPOINT}'/g" config.toml # metadata sed -i -e "s/IsMasterDB = .*/IsMasterDB = true/g" config.toml @@ -134,9 +134,9 @@ function make_config() { sed -i -e "s/DisableMetrics = false/DisableMetrics = true/" config.toml sed -i -e "s/DisablePProf = false/DisablePProf = true/" config.toml metrics_address="127.0.0.1:"$((SP_START_PORT+1000*$index + 367)) - sed -i -e "s/MetricsHttpAddress = '.*'/MetricsHttpAddress = '${metrics_address}'/g" config.toml + sed -i -e "s/MetricsHTTPAddress = '.*'/MetricsHTTPAddress = '${metrics_address}'/g" config.toml pprof_address="127.0.0.1:"$((SP_START_PORT+1000*$index + 368)) - sed -i -e "s/PProfHttpAddress = '.*'/PProfHttpAddress = '${pprof_address}'/g" config.toml + sed -i -e "s/PProfHTTPAddress = '.*'/PProfHTTPAddress = '${pprof_address}'/g" config.toml # blocksyncer sed -i -e "s/Modules = \[\]/Modules = \[\'epoch\',\'bucket\',\'object\',\'payment\',\'group\',\'permission\',\'storage_provider\'\,\'prefix_tree\'\]/g" config.toml diff --git a/modular/authorizer/authorizer.go b/modular/authorizer/authorizer.go index 767464846..5667606d8 100644 --- a/modular/authorizer/authorizer.go +++ b/modular/authorizer/authorizer.go @@ -219,8 +219,8 @@ func (a *AuthorizeModular) VerifyAuthorize( } return false, ErrConsensus } - if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperateAddress() { - log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperateAddress(), + if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperatorAddress() { + log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperatorAddress(), "require", bucketInfo.GetPrimarySpAddress()) return false, ErrMismatchSp } @@ -251,8 +251,8 @@ func (a *AuthorizeModular) VerifyAuthorize( } return false, ErrConsensus } - if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperateAddress() { - log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperateAddress(), + if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperatorAddress() { + log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperatorAddress(), "require", bucketInfo.GetPrimarySpAddress()) return false, ErrMismatchSp } @@ -283,8 +283,8 @@ func (a *AuthorizeModular) VerifyAuthorize( } return false, ErrConsensus } - if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperateAddress() { - log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperateAddress(), + if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperatorAddress() { + log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperatorAddress(), "require", bucketInfo.GetPrimarySpAddress()) return false, ErrMismatchSp } @@ -323,8 +323,8 @@ func (a *AuthorizeModular) VerifyAuthorize( } return false, ErrConsensus } - if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperateAddress() { - log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperateAddress(), + if bucketInfo.GetPrimarySpAddress() != a.baseApp.OperatorAddress() { + log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperatorAddress(), "require", bucketInfo.GetPrimarySpAddress()) return false, ErrMismatchSp } @@ -368,15 +368,15 @@ func (a *AuthorizeModular) VerifyAuthorize( } return false, ErrConsensus } - if strings.EqualFold(bucketInfo.GetPrimarySpAddress(), a.baseApp.OperateAddress()) { + if strings.EqualFold(bucketInfo.GetPrimarySpAddress(), a.baseApp.OperatorAddress()) { return true, nil } for _, address := range objectInfo.GetSecondarySpAddresses() { - if strings.EqualFold(address, a.baseApp.OperateAddress()) { + if strings.EqualFold(address, a.baseApp.OperatorAddress()) { return true, nil } } - log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperateAddress()) + log.CtxErrorw(ctx, "sp operator address mismatch", "current", a.baseApp.OperatorAddress()) return false, ErrMismatchSp default: return false, ErrUnsupportedAuthType diff --git a/modular/executor/execute_replicate.go b/modular/executor/execute_replicate.go index c012da160..4e529ba08 100644 --- a/modular/executor/execute_replicate.go +++ b/modular/executor/execute_replicate.go @@ -41,7 +41,7 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core high := math.Ceil(float64(low) * e.askReplicateApprovalExFactor) rAppTask := &gfsptask.GfSpReplicatePieceApprovalTask{} rAppTask.InitApprovalReplicatePieceTask(task.GetObjectInfo(), task.GetStorageParams(), - e.baseApp.TaskPriority(rAppTask), e.baseApp.OperateAddress()) + e.baseApp.TaskPriority(rAppTask), e.baseApp.OperatorAddress()) askReplicateApprovalTime := time.Now() approvals, err = e.AskReplicatePieceApproval(ctx, rAppTask, int(low), int(high), e.askReplicateApprovalTimeout) @@ -62,7 +62,7 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core log.CtxDebugw(ctx, "succeed to replicate all pieces") // combine seal object sealMsg := &storagetypes.MsgSealObject{ - Operator: e.baseApp.OperateAddress(), + Operator: e.baseApp.OperatorAddress(), BucketName: task.GetObjectInfo().GetBucketName(), ObjectName: task.GetObjectInfo().GetObjectName(), SecondarySpAddresses: task.GetSecondaryAddresses(), diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 2e5979748..6d5f066c5 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -35,7 +35,7 @@ func (e *ExecuteModular) HandleSealObjectTask(ctx context.Context, task coretask return } sealMsg := &storagetypes.MsgSealObject{ - Operator: e.baseApp.OperateAddress(), + Operator: e.baseApp.OperatorAddress(), BucketName: task.GetObjectInfo().GetBucketName(), ObjectName: task.GetObjectInfo().GetObjectName(), SecondarySpAddresses: task.GetSecondaryAddresses(), @@ -128,10 +128,10 @@ func (e *ExecuteModular) HandleReceivePieceTask(ctx context.Context, task coreta task.SetError(ErrReplicateIdsOutOfBounds) return } - if onChainObject.GetSecondarySpAddresses()[int(task.GetReplicateIdx())] != e.baseApp.OperateAddress() { + if onChainObject.GetSecondarySpAddresses()[int(task.GetReplicateIdx())] != e.baseApp.OperatorAddress() { log.CtxErrorw(ctx, "failed to confirm receive task, secondary sp mismatch", "expect", onChainObject.GetSecondarySpAddresses()[int(task.GetReplicateIdx())], - "current", e.baseApp.OperateAddress()) + "current", e.baseApp.OperatorAddress()) task.SetError(ErrSecondaryMismatch) err = e.baseApp.GfSpDB().DeleteObjectIntegrity(task.GetObjectInfo().Id.Uint64()) if err != nil { @@ -194,7 +194,7 @@ func (e *ExecuteModular) HandleGCObjectTask(ctx context.Context, task coretask.G }() if waitingGCObjects, responseEndBlockID, err = e.baseApp.GfSpClient().ListDeletedObjectsByBlockNumberRange( - ctx, e.baseApp.OperateAddress(), task.GetStartBlockNumber(), + ctx, e.baseApp.OperatorAddress(), task.GetStartBlockNumber(), task.GetEndBlockNumber(), true); err != nil { log.CtxErrorw(ctx, "failed to query deleted object list", "task_info", task.Info(), "error", err) return @@ -236,7 +236,7 @@ func (e *ExecuteModular) HandleGCObjectTask(ctx context.Context, task coretask.G "object_info", objectInfo, "piece_key", pieceKey, "error", deleteErr) } for rIdx, address := range objectInfo.GetSecondarySpAddresses() { - if strings.Compare(e.baseApp.OperateAddress(), address) == 0 { + if strings.Compare(e.baseApp.OperatorAddress(), address) == 0 { for segIdx := uint32(0); segIdx < segmentCount; segIdx++ { pieceKey := e.baseApp.PieceOp().ECPieceKey(currentGCObjectID, segIdx, uint32(rIdx)) if objectInfo.GetRedundancyType() == storagetypes.REDUNDANCY_REPLICA_TYPE { diff --git a/modular/gater/admin_handler.go b/modular/gater/admin_handler.go index b5e9b23d7..5137f5e80 100644 --- a/modular/gater/admin_handler.go +++ b/modular/gater/admin_handler.go @@ -335,13 +335,13 @@ func (g *GateModular) replicateHandler(w http.ResponseWriter, r *http.Request) { err = ErrDecodeMsg return } - if approval.GetApprovedSpOperatorAddress() != g.baseApp.OperateAddress() { + if approval.GetApprovedSpOperatorAddress() != g.baseApp.OperatorAddress() { log.CtxErrorw(reqCtx.Context(), "failed to verify replicate piece approval, sp mismatch") err = ErrMismatchSp return } verifySignatureTime := time.Now() - err = p2pnode.VerifySignature(g.baseApp.OperateAddress(), approval.GetSignBytes(), approval.GetApprovedSignature()) + err = p2pnode.VerifySignature(g.baseApp.OperatorAddress(), approval.GetSignBytes(), approval.GetApprovedSignature()) metrics.PerfReceivePieceTimeHistogram.WithLabelValues("receive_piece_verify_approval_time").Observe(time.Since(verifySignatureTime).Seconds()) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify replicate piece approval signature") diff --git a/modular/gater/auth_handler.go b/modular/gater/auth_handler.go index c71af511b..bf3670889 100644 --- a/modular/gater/auth_handler.go +++ b/modular/gater/auth_handler.go @@ -263,7 +263,7 @@ func (g *GateModular) verifySignedContent(signedContent string, expectedDomain s spAddress := match[1] // spName := match[2] // keep this line here to indicate match[2] means spName spNonce := match[3] - if spAddress == g.baseApp.OperateAddress() { + if spAddress == g.baseApp.OperatorAddress() { found = true if expectedNonce != spNonce { // nonce doesn't match return ErrSignedMsgNotMatchTemplate diff --git a/modular/gater/gater_options.go b/modular/gater/gater_options.go index 146c9dbaa..af65756d0 100644 --- a/modular/gater/gater_options.go +++ b/modular/gater/gater_options.go @@ -9,9 +9,9 @@ import ( ) const ( - DefaultGatewayDomain = "localhost:9133" - DefaultMaxListReadQuota = 100 - DefaultMaxPayloadSize = 2 * 1024 * 1024 * 1024 + DefaultGatewayDomainName = "localhost:9133" + DefaultMaxListReadQuota = 100 + DefaultMaxPayloadSize = 2 * 1024 * 1024 * 1024 ) func NewGateModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (coremodule.Modular, error) { @@ -23,11 +23,11 @@ func NewGateModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (corem } func DefaultGaterOptions(gater *GateModular, cfg *gfspconfig.GfSpConfig) error { - if cfg.Gateway.Domain == "" { - cfg.Gateway.Domain = DefaultGatewayDomain + if cfg.Gateway.DomainName == "" { + cfg.Gateway.DomainName = DefaultGatewayDomainName } - if cfg.Gateway.HttpAddress == "" { - cfg.Gateway.HttpAddress = DefaultGatewayDomain + if cfg.Gateway.HTTPAddress == "" { + cfg.Gateway.HTTPAddress = DefaultGatewayDomainName } if cfg.Bucket.MaxListReadQuotaNumber == 0 { cfg.Bucket.MaxListReadQuotaNumber = DefaultMaxListReadQuota @@ -36,8 +36,8 @@ func DefaultGaterOptions(gater *GateModular, cfg *gfspconfig.GfSpConfig) error { cfg.Bucket.MaxPayloadSize = DefaultMaxPayloadSize } gater.maxPayloadSize = cfg.Bucket.MaxPayloadSize - gater.domain = cfg.Gateway.Domain - gater.httpAddress = cfg.Gateway.HttpAddress + gater.domain = cfg.Gateway.DomainName + gater.httpAddress = cfg.Gateway.HTTPAddress gater.maxListReadQuota = cfg.Bucket.MaxListReadQuotaNumber rateCfg := makeAPIRateLimitCfg(cfg.APIRateLimiter) if err := localhttp.NewAPILimiter(rateCfg); err != nil { diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index d58afe108..e22796286 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -396,11 +396,10 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, } bucketPrimarySpAddress := getBucketInfoRes.GetBucketInfo().GetPrimarySpAddress() - // if bucket not in the current sp, 302 redirect to the sp that contains the bucket - if !strings.EqualFold(bucketPrimarySpAddress, g.baseApp.OperateAddress()) { + if !strings.EqualFold(bucketPrimarySpAddress, g.baseApp.OperatorAddress()) { log.Debugw("primary sp address not matched ", - "bucketPrimarySpAddress", bucketPrimarySpAddress, "gateway.config.SpOperatorAddress", g.baseApp.OperateAddress(), + "bucketPrimarySpAddress", bucketPrimarySpAddress, "gateway.config.SpOperatorAddress", g.baseApp.OperatorAddress(), ) spEndpoint, getEndpointErr := g.baseApp.GfSpClient().GetEndpointBySpAddress(reqCtx.Context(), bucketPrimarySpAddress) diff --git a/modular/manager/manager.go b/modular/manager/manager.go index b84453239..2d0b73714 100644 --- a/modular/manager/manager.go +++ b/modular/manager/manager.go @@ -157,7 +157,7 @@ func (m *ManageModular) eventLoop(ctx context.Context) { continue } m.discontinueBuckets(ctx) - log.Infof("finish to discontinue buckets", "time", time.Now()) + log.Infow("finished to discontinue buckets", "time", time.Now()) } } } @@ -165,7 +165,7 @@ func (m *ManageModular) eventLoop(ctx context.Context) { func (m *ManageModular) discontinueBuckets(ctx context.Context) { createAt := time.Now().AddDate(0, 0, -m.discontinueBucketKeepAliveDays) buckets, err := m.baseApp.GfSpClient().ListExpiredBucketsBySp(context.Background(), - createAt.Unix(), m.baseApp.OperateAddress(), DiscontinueBucketLimit) + createAt.Unix(), m.baseApp.OperatorAddress(), DiscontinueBucketLimit) if err != nil { log.Errorw("failed to query expired buckets", "error", err) return @@ -449,7 +449,7 @@ func (m *ManageModular) syncConsensusInfo(ctx context.Context) { return } for _, sp := range spList { - if strings.EqualFold(m.baseApp.OperateAddress(), sp.OperatorAddress) { + if strings.EqualFold(m.baseApp.OperatorAddress(), sp.OperatorAddress) { if err = m.baseApp.GfSpDB().SetOwnSpInfo(sp); err != nil { log.Errorw("failed to set own sp info", "error", err) return diff --git a/modular/p2p/p2pnode/approval.go b/modular/p2p/p2pnode/approval.go index 9b2c0b934..8220258dd 100644 --- a/modular/p2p/p2pnode/approval.go +++ b/modular/p2p/p2pnode/approval.go @@ -120,7 +120,7 @@ func (a *ApprovalProtocol) onGetApprovalRequest(s network.Stream) { req.GetAskSpOperatorAddress(), "local", s.Conn().LocalPeer(), "remote", s.Conn().RemotePeer()) return } - if strings.Compare(req.GetAskSpOperatorAddress(), a.node.baseApp.OperateAddress()) == 0 { + if strings.Compare(req.GetAskSpOperatorAddress(), a.node.baseApp.OperatorAddress()) == 0 { log.CtxWarnw(ctx, "ignore self replicate piece approval request", "sp", req.GetAskSpOperatorAddress(), "local", s.Conn().LocalPeer(), "remote", s.Conn().RemotePeer()) return @@ -151,7 +151,7 @@ func (a *ApprovalProtocol) onGetApprovalRequest(s network.Stream) { return } req.SetApprovedSignature(signature) - req.SetApprovedSpOperatorAddress(a.node.baseApp.OperateAddress()) + req.SetApprovedSpOperatorAddress(a.node.baseApp.OperatorAddress()) err = a.node.sendToPeer(ctx, s.Conn().RemotePeer(), GetApprovalResponse, req) log.Infof("%s response to %s approval request, task_key: %s, error: %v", s.Conn().LocalPeer(), s.Conn().RemotePeer(), req.Key().String(), err) @@ -187,7 +187,7 @@ func (a *ApprovalProtocol) onGetApprovalResponse(s network.Stream) { "local", s.Conn().LocalPeer(), "remote", s.Conn().RemotePeer()) return } - if strings.Compare(resp.GetApprovedSpOperatorAddress(), a.node.baseApp.OperateAddress()) == 0 { + if strings.Compare(resp.GetApprovedSpOperatorAddress(), a.node.baseApp.OperatorAddress()) == 0 { log.CtxWarnw(ctx, "ignore self approval response", "sp", resp.GetApprovedSpOperatorAddress(), "local", s.Conn().LocalPeer(), "remote", s.Conn().RemotePeer()) return diff --git a/modular/p2p/p2pnode/node.go b/modular/p2p/p2pnode/node.go index c7dfb4d81..c4c43a4c3 100644 --- a/modular/p2p/p2pnode/node.go +++ b/modular/p2p/p2pnode/node.go @@ -187,7 +187,7 @@ func (n *Node) GetSecondaryReplicatePieceApproval( return } defer n.approval.cancelApprovalRequest(task.GetObjectInfo().Id.Uint64()) - task.SetAskSpOperatorAddress(n.baseApp.OperateAddress()) + task.SetAskSpOperatorAddress(n.baseApp.OperatorAddress()) signature, err := n.baseApp.GfSpClient().SignReplicatePieceApproval(ctx, task) if err != nil { log.CtxErrorw(ctx, "failed to sign replicate piece approval request") @@ -245,7 +245,7 @@ func (n *Node) eventLoop() { } ping := &gfspp2p.GfSpPing{ - SpOperatorAddress: n.baseApp.OperateAddress(), + SpOperatorAddress: n.baseApp.OperatorAddress(), } ctx := context.Background() sinagture, err := n.baseApp.GfSpClient().SignP2PPingMsg(ctx, ping) diff --git a/modular/p2p/p2pnode/ping.go b/modular/p2p/p2pnode/ping.go index 12930555f..6182709b5 100644 --- a/modular/p2p/p2pnode/ping.go +++ b/modular/p2p/p2pnode/ping.go @@ -84,7 +84,7 @@ func (n *Node) onPing(s network.Stream) { } } - pong.SpOperatorAddress = n.baseApp.OperateAddress() + pong.SpOperatorAddress = n.baseApp.OperatorAddress() signature, err := n.baseApp.GfSpClient().SignP2PPongMsg(context.Background(), pong) if err != nil { log.Errorw("failed to sign pong msg", "local", s.Conn().LocalPeer(), "remote", s.Conn().RemotePeer(), "error", err) From 4c659e982eafbe91a4a7ab316f7d43856e5d7809 Mon Sep 17 00:00:00 2001 From: Raina <1007411869@qq.com> Date: Fri, 16 Jun 2023 14:48:19 +0800 Subject: [PATCH 13/78] feat: add perf metrics (#610) Co-authored-by: reneecok --- base/gfspclient/downloader.go | 2 -- modular/gater/object_handler.go | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/base/gfspclient/downloader.go b/base/gfspclient/downloader.go index f7892e3b0..d577266b4 100644 --- a/base/gfspclient/downloader.go +++ b/base/gfspclient/downloader.go @@ -46,9 +46,7 @@ func (s *GfSpClient) GetPiece(ctx context.Context, downloadPieceTask coretask.Do req := &gfspserver.GfSpDownloadPieceRequest{ DownloadPieceTask: downloadPieceTask.(*gfsptask.GfSpDownloadPieceTask), } - startTime := time.Now() resp, err := gfspserver.NewGfSpDownloadServiceClient(conn).GfSpDownloadPiece(ctx, req) - metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_client_total_time").Observe(time.Since(startTime).Seconds()) if err != nil { log.CtxErrorw(ctx, "client failed to download piece", "error", err) return nil, ErrRpcUnknown diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index e22796286..276f3c487 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -160,12 +160,15 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { }() reqCtx, reqCtxErr = NewRequestContext(r, g) // check the object permission whether allow public read. + verifyObjectPermissionTime := time.Now() if authorized, err = g.baseApp.Consensus().VerifyGetObjectPermission(reqCtx.Context(), sdk.AccAddress{}.String(), reqCtx.bucketName, reqCtx.objectName); err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authorize for getting public object", "error", err) err = ErrConsensus return } + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_verify_object_permission_time").Observe(time.Since(verifyObjectPermissionTime).Seconds()) + if !authorized { if reqCtxErr != nil { err = reqCtxErr @@ -265,7 +268,10 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { log.CtxErrorw(reqCtx.Context(), "failed to download piece", "error", err) return } + + writeTime := time.Now() w.Write(pieceData) + metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_write_time").Observe(time.Since(writeTime).Seconds()) } metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_get_data_time").Observe(time.Since(getDataTime).Seconds()) } From 7a133ab6679b53a1477aec3c35e9642ea5c775ff Mon Sep 17 00:00:00 2001 From: joeycli Date: Sat, 17 Jun 2023 04:20:06 +0800 Subject: [PATCH 14/78] feat: add reject unseal object retry and confirm (#609) * feat: add reject unseal object retry and confirm * fix: manager fiter callback bug * fix: reject unseal nonce bug * fix: async handle upload task --- base/gfspapp/task_options.go | 2 +- base/gfsptqueue/queue.go | 48 +++++--- base/gfsptqueue/queue_limit.go | 50 ++++++--- base/gnfd/gnfd.go | 5 +- base/gnfd/gnfd_service.go | 24 ++++ base/types/gfsptask/task.go | 3 - core/consensus/consensus.go | 5 + modular/downloader/download_task.go | 2 +- modular/executor/executor.go | 6 +- modular/executor/executor_options.go | 2 +- modular/manager/manage_task.go | 2 +- modular/manager/manager.go | 68 +++++++---- modular/manager/manager_options.go | 4 + modular/signer/signer.go | 7 +- modular/signer/signer_client.go | 161 +++++++++++++++++---------- 15 files changed, 261 insertions(+), 128 deletions(-) diff --git a/base/gfspapp/task_options.go b/base/gfspapp/task_options.go index f184f3467..d3295fb5e 100644 --- a/base/gfspapp/task_options.go +++ b/base/gfspapp/task_options.go @@ -20,7 +20,7 @@ const ( // MaxReplicateTime defines the max timeout to replicate object. MaxReplicateTime int64 = 500 // MinReceiveTime defines the min timeout to confirm the received piece whether is sealed on greenfield. - MinReceiveTime int64 = 2 + MinReceiveTime int64 = 5 // MaxReceiveTime defines the max timeout to confirm the received piece whether is sealed on greenfield. MaxReceiveTime int64 = 10 // MinSealObjectTime defines the min timeout to seal object to greenfield. diff --git a/base/gfsptqueue/queue.go b/base/gfsptqueue/queue.go index 9222231a2..61ba91ffb 100644 --- a/base/gfsptqueue/queue.go +++ b/base/gfsptqueue/queue.go @@ -2,6 +2,7 @@ package gfsptqueue import ( "net/http" + "sort" "sync" "time" @@ -26,10 +27,11 @@ var _ taskqueue.TQueue = &GfSpTQueue{} var _ taskqueue.TQueueOnStrategy = &GfSpTQueue{} type GfSpTQueue struct { - name string - tasks map[coretask.TKey]coretask.Task - cap int - mux sync.RWMutex + name string + current int64 + tasks map[coretask.TKey]coretask.Task + cap int + mux sync.RWMutex gcFunc func(task2 coretask.Task) bool filterFunc func(task2 coretask.Task) bool @@ -163,27 +165,41 @@ func (t *GfSpTQueue) has(key coretask.TKey) bool { } func (t *GfSpTQueue) top() coretask.Task { - tasksCreateMap := make(map[int64]coretask.Task) - for _, task := range t.tasks { - tasksCreateMap[task.GetCreateTime()] = task + if len(t.tasks) == 0 { + return nil } - keys := maps.SortKeys(tasksCreateMap) - for _, key := range keys { - task := tasksCreateMap[key] + var backupTasks []coretask.Task + var gcTasks []coretask.Task + defer func() { + for _, task := range gcTasks { + delete(t.tasks, task.Key()) + } + }() + for _, task := range t.tasks { if t.gcFunc != nil { if t.gcFunc(task) { - t.delete(t.tasks[task.Key()]) + gcTasks = append(gcTasks, task) + continue } } if t.filterFunc != nil { - if t.filterFunc(task) { - return task + if !t.filterFunc(task) { + continue } - } else { - return task } + backupTasks = append(backupTasks, task) } - return nil + if len(backupTasks) == 0 { + return nil + } + sort.Slice(backupTasks, func(i, j int) bool { + return backupTasks[i].GetCreateTime() < backupTasks[j].GetCreateTime() + }) + index := sort.Search(len(backupTasks), func(i int) bool { return backupTasks[i].GetCreateTime() > t.current }) + if index == len(backupTasks) { + index = 0 + } + return backupTasks[index] } // SetFilterTaskStrategy sets the callback func to filter task for popping or topping. diff --git a/base/gfsptqueue/queue_limit.go b/base/gfsptqueue/queue_limit.go index e89f80fff..9e54c8b82 100644 --- a/base/gfsptqueue/queue_limit.go +++ b/base/gfsptqueue/queue_limit.go @@ -1,6 +1,7 @@ package gfsptqueue import ( + "sort" "sync" "time" @@ -16,10 +17,11 @@ var _ taskqueue.TQueueWithLimit = &GfSpTQueueWithLimit{} var _ taskqueue.TQueueOnStrategyWithLimit = &GfSpTQueueWithLimit{} type GfSpTQueueWithLimit struct { - name string - tasks map[coretask.TKey]coretask.Task - cap int - mux sync.RWMutex + name string + current int64 + tasks map[coretask.TKey]coretask.Task + cap int + mux sync.RWMutex gcFunc func(task2 coretask.Task) bool filterFunc func(task2 coretask.Task) bool @@ -155,29 +157,41 @@ func (t *GfSpTQueueWithLimit) has(key coretask.TKey) bool { } func (t *GfSpTQueueWithLimit) topByLimit(limit corercmgr.Limit) coretask.Task { - tasksCreateMap := make(map[int64]coretask.Task) - for _, task := range t.tasks { - tasksCreateMap[task.GetCreateTime()] = task + if len(t.tasks) == 0 { + return nil } - keys := maps.SortKeys(tasksCreateMap) - for _, key := range keys { - task := tasksCreateMap[key] + var backupTasks []coretask.Task + var gcTasks []coretask.Task + defer func() { + for _, task := range gcTasks { + delete(t.tasks, task.Key()) + } + }() + for _, task := range t.tasks { if t.gcFunc != nil { if t.gcFunc(task) { - t.delete(t.tasks[task.Key()]) + gcTasks = append(gcTasks, task) + continue } } if limit.NotLess(task.EstimateLimit()) { - if t.filterFunc != nil { - if t.filterFunc(task) { - return task - } - } else { - return task + if t.filterFunc != nil && !t.filterFunc(task) { + continue } + backupTasks = append(backupTasks, task) } } - return nil + if len(backupTasks) == 0 { + return nil + } + sort.Slice(backupTasks, func(i, j int) bool { + return backupTasks[i].GetCreateTime() < backupTasks[j].GetCreateTime() + }) + index := sort.Search(len(backupTasks), func(i int) bool { return backupTasks[i].GetCreateTime() > t.current }) + if index == len(backupTasks) { + index = 0 + } + return backupTasks[index] } // SetFilterTaskStrategy sets the callback func to filter task for popping or topping. diff --git a/base/gnfd/gnfd.go b/base/gnfd/gnfd.go index a46f60068..41e240c6d 100644 --- a/base/gnfd/gnfd.go +++ b/base/gnfd/gnfd.go @@ -23,8 +23,9 @@ const ( ) var ( - ErrNoSuchBucket = gfsperrors.Register(GreenFieldChain, http.StatusBadRequest, 500001, "no such bucket") - ErrSealTimeout = gfsperrors.Register(GreenFieldChain, http.StatusBadRequest, 500002, "seal failed") + ErrNoSuchBucket = gfsperrors.Register(GreenFieldChain, http.StatusBadRequest, 500001, "no such bucket") + ErrSealTimeout = gfsperrors.Register(GreenFieldChain, http.StatusBadRequest, 500002, "seal failed") + ErrRejectUnSealTimeout = gfsperrors.Register(GreenFieldChain, http.StatusBadRequest, 500003, "reject unseal failed") ) // GreenfieldClient the greenfield chain client, only use to query. diff --git a/base/gnfd/gnfd_service.go b/base/gnfd/gnfd_service.go index ca36dd9bf..a53f654da 100644 --- a/base/gnfd/gnfd_service.go +++ b/base/gnfd/gnfd_service.go @@ -4,6 +4,7 @@ import ( "context" "math" "strconv" + "strings" "time" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" @@ -197,6 +198,29 @@ func (g *Gnfd) ListenObjectSeal(ctx context.Context, objectID uint64, timeoutHei return false, err } +// ListenRejectUnSealObject returns an indication of the object is rejected. +// TODO:: retrieve service support reject unseal event subscription +func (g *Gnfd) ListenRejectUnSealObject(ctx context.Context, objectID uint64, timeoutHeight int) (bool, error) { + startTime := time.Now() + defer metrics.GnfdChainHistogram.WithLabelValues("wait_reject_unseal_object").Observe(time.Since(startTime).Seconds()) + var err error + for i := 0; i < timeoutHeight; i++ { + _, err = g.QueryObjectInfoByID(ctx, strconv.FormatUint(objectID, 10)) + if err != nil { + if strings.Contains(err.Error(), "No such object") { + return true, nil + } + } + time.Sleep(ExpectedOutputBlockInternal * time.Second) + } + if err == nil { + log.CtxErrorw(ctx, "reject unseal object timeout", "object_id", objectID) + return false, ErrRejectUnSealTimeout + } + log.CtxErrorw(ctx, "failed to listen reject unseal object", "object_id", objectID, "error", err) + return false, err +} + // QueryPaymentStreamRecord returns the steam record info by account. func (g *Gnfd) QueryPaymentStreamRecord(ctx context.Context, account string) (*paymenttypes.StreamRecord, error) { startTime := time.Now() diff --git a/base/types/gfsptask/task.go b/base/types/gfsptask/task.go index 42f449af3..d29e553c3 100644 --- a/base/types/gfsptask/task.go +++ b/base/types/gfsptask/task.go @@ -44,9 +44,6 @@ func (m *GfSpTask) SetTimeout(timeout int64) { } func (m *GfSpTask) ExceedTimeout() bool { - if m.Retry == 0 { - return false - } return m.GetUpdateTime()+m.GetTimeout() < time.Now().Unix() } diff --git a/core/consensus/consensus.go b/core/consensus/consensus.go index 659bdedbd..017c1da46 100644 --- a/core/consensus/consensus.go +++ b/core/consensus/consensus.go @@ -40,6 +40,8 @@ type Consensus interface { VerifyPutObjectPermission(ctx context.Context, account, bucket, object string) (bool, error) // ListenObjectSeal returns an indicator whether the object is successfully sealed before timeOutHeight. ListenObjectSeal(ctx context.Context, objectID uint64, timeOutHeight int) (bool, error) + // ListenRejectUnSealObject returns an indication of the object is rejected. + ListenRejectUnSealObject(ctx context.Context, objectID uint64, timeoutHeight int) (bool, error) // Close the Consensus interface. Close() error } @@ -90,4 +92,7 @@ func (*NullConsensus) VerifyPutObjectPermission(context.Context, string, string, func (*NullConsensus) ListenObjectSeal(context.Context, uint64, int) (bool, error) { return false, nil } +func (*NullConsensus) ListenRejectUnSealObject(context.Context, uint64, int) (bool, error) { + return false, nil +} func (*NullConsensus) Close() error { return nil } diff --git a/modular/downloader/download_task.go b/modular/downloader/download_task.go index f0145c6e5..eb1e636da 100644 --- a/modular/downloader/download_task.go +++ b/modular/downloader/download_task.go @@ -253,7 +253,7 @@ func (d *DownloadModular) PreChallengePiece(ctx context.Context, downloadPieceTa log.CtxErrorw(ctx, "failed to pre challenge piece due to object unsealed") return ErrObjectUnsealed } - go d.baseApp.GfSpClient().ReportTask(ctx, downloadPieceTask) + d.baseApp.GfSpClient().ReportTask(ctx, downloadPieceTask) return nil } diff --git a/modular/executor/executor.go b/modular/executor/executor.go index 25e742bdf..2ef66fc38 100644 --- a/modular/executor/executor.go +++ b/modular/executor/executor.go @@ -3,6 +3,7 @@ package executor import ( "context" "fmt" + "math/rand" "sync/atomic" "time" @@ -66,8 +67,9 @@ func (e *ExecuteModular) eventLoop(ctx context.Context) { default: err := e.AskTask(ctx) if err != nil { - log.CtxErrorw(ctx, "failed to handle ask task, will hold on", "error", err) - time.Sleep(time.Duration(DefaultSleepInterval) * time.Millisecond) + rand.New(rand.NewSource(time.Now().Unix())) + sleep := rand.Intn(DefaultSleepInterval) + 1 + time.Sleep(time.Duration(sleep) * time.Millisecond) } } } diff --git a/modular/executor/executor_options.go b/modular/executor/executor_options.go index 7f696b718..9ac02de67 100644 --- a/modular/executor/executor_options.go +++ b/modular/executor/executor_options.go @@ -8,7 +8,7 @@ import ( const ( // DefaultExecutorMaxExecuteNum defines the default max parallel execute task number. - DefaultExecutorMaxExecuteNum int64 = 16 + DefaultExecutorMaxExecuteNum int64 = 64 // DefaultExecutorAskTaskInterval defines the default ask task interval from manager. DefaultExecutorAskTaskInterval int = 1 // DefaultExecutorAskReplicateApprovalTimeout defines the ask replicate piece approval diff --git a/modular/manager/manage_task.go b/modular/manager/manage_task.go index 82d3da0f1..fbf5930ca 100644 --- a/modular/manager/manage_task.go +++ b/modular/manager/manage_task.go @@ -42,7 +42,7 @@ func (m *ManageModular) DispatchTask(ctx context.Context, limit rcmgr.Limit) (ta task = m.sealQueue.TopByLimit(limit) if task != nil { log.CtxDebugw(ctx, "add seal object task to backup set", "task_key", task.Key().String(), - "task_limit", "task_limit", task.EstimateLimit().String()) + "task_limit", task.EstimateLimit().String()) backupTasks = append(backupTasks, task) } task = m.gcObjectQueue.TopByLimit(limit) diff --git a/modular/manager/manager.go b/modular/manager/manager.go index 2d0b73714..ab2c6e7e4 100644 --- a/modular/manager/manager.go +++ b/modular/manager/manager.go @@ -342,13 +342,15 @@ func (m *ManageModular) UploadingObjectNumber() int { func (m *ManageModular) GCUploadObjectQueue(qTask task.Task) bool { task := qTask.(task.UploadObjectTask) if task.Expired() { - if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_UPLOAD_OBJECT_ERROR, - ErrorDescription: "expired", - }); err != nil { - log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) - } + go func() { + if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_UPLOAD_OBJECT_ERROR, + ErrorDescription: "expired", + }); err != nil { + log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) + } + }() return true } return false @@ -357,13 +359,15 @@ func (m *ManageModular) GCUploadObjectQueue(qTask task.Task) bool { func (m *ManageModular) GCReplicatePieceQueue(qTask task.Task) bool { task := qTask.(task.ReplicatePieceTask) if task.Expired() { - if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_REPLICATE_OBJECT_ERROR, - ErrorDescription: "expired", - }); err != nil { - log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) - } + go func() { + if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_REPLICATE_OBJECT_ERROR, + ErrorDescription: "expired", + }); err != nil { + log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) + } + }() return true } return false @@ -372,13 +376,15 @@ func (m *ManageModular) GCReplicatePieceQueue(qTask task.Task) bool { func (m *ManageModular) GCSealObjectQueue(qTask task.Task) bool { task := qTask.(task.SealObjectTask) if task.Expired() { - if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_ERROR, - ErrorDescription: "expired", - }); err != nil { - log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) - } + go func() { + if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_ERROR, + ErrorDescription: "expired", + }); err != nil { + log.Errorw("failed to update task state", "task_key", task.Key().String(), "error", err) + } + }() return true } return false @@ -407,7 +413,16 @@ func (m *ManageModular) FilterGCTask(qTask task.Task) bool { } func (m *ManageModular) FilterUploadingTask(qTask task.Task) bool { - return !qTask.Expired() && (qTask.GetRetry() == 0 || qTask.ExceedTimeout()) + if qTask.ExceedRetry() { + return false + } + if qTask.ExceedTimeout() { + return true + } + if qTask.GetRetry() == 0 { + return true + } + return false } func (m *ManageModular) PickUpTask(ctx context.Context, tasks []task.Task) task.Task { @@ -472,6 +487,15 @@ func (m *ManageModular) RejectUnSealObject(ctx context.Context, object *storaget time.Sleep(RejectUnSealObjectTimeout * time.Second) } else { log.CtxDebugw(ctx, "succeed to reject unseal object") + reject, err := m.baseApp.Consensus().ListenRejectUnSealObject(ctx, object.Id.Uint64(), DefaultListenRejectUnSealTimeoutHeight) + if err != nil { + log.CtxErrorw(ctx, "failed to reject unseal object", "error", err) + continue + } + if !reject { + log.CtxErrorw(ctx, "failed to reject unseal object") + continue + } return nil } } diff --git a/modular/manager/manager_options.go b/modular/manager/manager_options.go index 685a5ab1b..374951ea6 100644 --- a/modular/manager/manager_options.go +++ b/modular/manager/manager_options.go @@ -53,6 +53,10 @@ const ( // DefaultStatisticsOutputInterval defines the default interval for output statistics info, // it is used to log and debug. DefaultStatisticsOutputInterval int = 60 + // DefaultListenRejectUnSealTimeoutHeight defines the default listen reject unseal object + // on greenfield timeout height, if after current block height + timeout height, the object + // is not rejected, it is judged failed to reject unseal object on greenfield. + DefaultListenRejectUnSealTimeoutHeight int = 10 // DefaultDiscontinueTimeInterval defines the default interval for starting discontinue // buckets task , used for test net. diff --git a/modular/signer/signer.go b/modular/signer/signer.go index 1c4e0ae47..ad772aa17 100644 --- a/modular/signer/signer.go +++ b/modular/signer/signer.go @@ -20,9 +20,10 @@ import ( var ( ErrSignMsg = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120001, "sign message with private key failed") - ErrSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120002, "send sealObject msg failed") - ErrRejectUnSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120003, "send rejectUnSealObject msg failed") - ErrDiscontinueBucketOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120004, "send discontinueBucket msg failed") + ErrSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120002, "send seal object msg failed") + ErrRejectUnSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120003, "send reject unseal object msg failed") + ErrDiscontinueBucketOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120004, "send discontinue bucket msg failed") + ErrDanglingPointer = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120005, "sign or tx msg pointer dangling") ) var _ module.Signer = &SignModular{} diff --git a/modular/signer/signer_client.go b/modular/signer/signer_client.go index d7b692759..653c33953 100644 --- a/modular/signer/signer_client.go +++ b/modular/signer/signer_client.go @@ -3,6 +3,7 @@ package signer import ( "context" "encoding/hex" + "fmt" "strings" "sync" @@ -35,6 +36,9 @@ const ( // SignGc is the type of signature signed by the gc account SignGc SignType = "gc" + + // BroadcastTxRetry defines the max retry for broadcasting tx on-chain + BroadcastTxRetry = 3 ) // GreenfieldChainSignClient the greenfield chain client @@ -164,9 +168,15 @@ func (client *GreenfieldChainSignClient) SealObject( scope SignType, sealObject *storagetypes.MsgSealObject) ( []byte, error) { + if sealObject == nil { + log.CtxErrorw(ctx, "seal object msg pointer dangling") + return nil, ErrDanglingPointer + } + ctx = log.WithValue(ctx, log.CtxKeyBucketName, sealObject.GetBucketName()) + ctx = log.WithValue(ctx, log.CtxKeyObjectName, sealObject.GetObjectName()) km, err := client.greenfieldClients[scope].GetKeyManager() if err != nil { - log.CtxErrorw(ctx, "failed to get private key", "err", err) + log.CtxErrorw(ctx, "failed to get private key", "error", err) return nil, ErrSignMsg } @@ -187,39 +197,53 @@ func (client *GreenfieldChainSignClient) SealObject( msgSealObject := storagetypes.NewMsgSealObject(km.GetAddr(), sealObject.BucketName, sealObject.ObjectName, secondarySPAccs, sealObject.SecondarySpSignatures) mode := tx.BroadcastMode_BROADCAST_MODE_ASYNC - txOpt := &ctypes.TxOption{ - Mode: &mode, - GasLimit: client.gasLimit, - Nonce: nonce, - } - resp, err := client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgSealObject}, txOpt) - if err != nil { - log.CtxErrorw(ctx, "failed to broadcast tx", "err", err, "seal_info", msgSealObject.String()) - if strings.Contains(err.Error(), "account sequence mismatch") { - // if nonce mismatch, reset nonce by querying the nonce on chain - nonce, err = client.greenfieldClients[scope].GetNonce() - if err != nil { - log.CtxErrorw(ctx, "failed to get seal account nonce", "err", err, "seal_info", msgSealObject.String()) - return nil, ErrSealObjectOnChain + var ( + resp *tx.BroadcastTxResponse + txHash []byte + ) + for i := 0; i < BroadcastTxRetry; i++ { + txOpt := &ctypes.TxOption{ + Mode: &mode, + GasLimit: client.gasLimit, + Nonce: nonce, + } + resp, err = client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgSealObject}, txOpt) + if err != nil { + log.CtxErrorw(ctx, "failed to broadcast seal object tx", "error", err) + if strings.Contains(err.Error(), "account sequence mismatch") { + // if nonce mismatch, reset nonce by querying the nonce on chain + nonce, err = client.greenfieldClients[scope].GetNonce() + if err != nil { + log.CtxErrorw(ctx, "failed to get seal account nonce", "error", err) + ErrSealObjectOnChain.SetError(fmt.Errorf("failed to get seal account nonce, error: %v", err)) + return nil, ErrSealObjectOnChain + } + client.sealAccNonce = nonce } - client.sealAccNonce = nonce + continue } - return nil, ErrSealObjectOnChain - } + client.sealAccNonce = nonce + 1 - if resp.TxResponse.Code != 0 { - log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code, "seal_info", msgSealObject.String()) - return nil, ErrSealObjectOnChain - } - txHash, err := hex.DecodeString(resp.TxResponse.TxHash) - if err != nil { - log.CtxErrorw(ctx, "failed to marshal tx hash", "err", err, "seal_info", msgSealObject.String()) - return nil, ErrSealObjectOnChain + if resp.TxResponse.Code != 0 { + log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code) + ErrSealObjectOnChain.SetError(fmt.Errorf("failed to broadcast seal object tx, resp_code: %d", resp.TxResponse.Code)) + err = ErrSealObjectOnChain + continue + } + txHash, err = hex.DecodeString(resp.TxResponse.TxHash) + if err != nil { + log.CtxErrorw(ctx, "failed to marshal tx hash", "error", err) + ErrSealObjectOnChain.SetError(fmt.Errorf("failed to decode seal object tx hash, error: %v", err)) + err = ErrSealObjectOnChain + continue + } + if err == nil { + log.CtxDebugw(ctx, "succeed to broadcast seal object tx", "tx_hash", txHash) + return txHash, nil + } } - client.sealAccNonce = nonce + 1 - - return txHash, nil + return nil, err } // RejectUnSealObject reject seal object on the greenfield chain. @@ -228,9 +252,15 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( scope SignType, rejectObject *storagetypes.MsgRejectSealObject) ( []byte, error) { + if rejectObject == nil { + log.CtxErrorw(ctx, "reject unseal object msg pointer dangling") + return nil, ErrDanglingPointer + } + ctx = log.WithValue(ctx, log.CtxKeyBucketName, rejectObject.GetBucketName()) + ctx = log.WithValue(ctx, log.CtxKeyObjectName, rejectObject.GetObjectName()) km, err := client.greenfieldClients[scope].GetKeyManager() if err != nil { - log.CtxErrorw(ctx, "failed to get private key", "err", err) + log.CtxErrorw(ctx, "failed to get private key", "error", err) return nil, ErrSignMsg } @@ -240,39 +270,54 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( msgRejectUnSealObject := storagetypes.NewMsgRejectUnsealedObject(km.GetAddr(), rejectObject.GetBucketName(), rejectObject.GetObjectName()) mode := tx.BroadcastMode_BROADCAST_MODE_ASYNC - txOpt := &ctypes.TxOption{ - Mode: &mode, - GasLimit: client.gasLimit, - Nonce: nonce, - } - resp, err := client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgRejectUnSealObject}, txOpt) - if err != nil { - log.CtxErrorw(ctx, "failed to broadcast tx", "err", err, "seal_info", msgRejectUnSealObject.String()) - if strings.Contains(err.Error(), "account sequence mismatch") { - // if nonce mismatch, reset nonce by querying the nonce on chain - nonce, err = client.greenfieldClients[scope].GetNonce() - if err != nil { - log.CtxErrorw(ctx, "failed to get seal account nonce", "err", err, "seal_info", msgRejectUnSealObject.String()) - return nil, ErrRejectUnSealObjectOnChain + var ( + resp *tx.BroadcastTxResponse + txHash []byte + ) + for i := 0; i < BroadcastTxRetry; i++ { + txOpt := &ctypes.TxOption{ + Mode: &mode, + GasLimit: client.gasLimit, + Nonce: nonce, + } + resp, err = client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgRejectUnSealObject}, txOpt) + if err != nil { + log.CtxErrorw(ctx, "failed to broadcast reject unseal object tx", "error", err) + if strings.Contains(err.Error(), "account sequence mismatch") { + // if nonce mismatch, reset nonce by querying the nonce on chain + nonce, err = client.greenfieldClients[scope].GetNonce() + if err != nil { + log.CtxErrorw(ctx, "failed to get seal account nonce", "error", err) + ErrRejectUnSealObjectOnChain.SetError(fmt.Errorf("failed to get seal account nonce, error: %v", err)) + return nil, ErrRejectUnSealObjectOnChain + } + client.sealAccNonce = nonce } - client.sealAccNonce = nonce + continue } - return nil, ErrRejectUnSealObjectOnChain - } + client.sealAccNonce = nonce + 1 - if resp.TxResponse.Code != 0 { - log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code, "seal_info", msgRejectUnSealObject.String()) - return nil, ErrSealObjectOnChain - } - txHash, err := hex.DecodeString(resp.TxResponse.TxHash) - if err != nil { - log.CtxErrorw(ctx, "failed to marshal tx hash", "err", err, "seal_info", msgRejectUnSealObject.String()) - return nil, ErrSealObjectOnChain - } - client.sealAccNonce = nonce + 1 + if resp.TxResponse.Code != 0 { + log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code) + ErrSealObjectOnChain.SetError(fmt.Errorf("failed to broadcast reject unseal object tx, resp_code: %d", resp.TxResponse.Code)) + err = ErrSealObjectOnChain + continue + } + txHash, err = hex.DecodeString(resp.TxResponse.TxHash) + if err != nil { + log.CtxErrorw(ctx, "failed to marshal tx hash", "error", err) + ErrRejectUnSealObjectOnChain.SetError(fmt.Errorf("failed to decode reject unseal object tx hash, error: %v", err)) + err = ErrRejectUnSealObjectOnChain + continue + } - return txHash, nil + if err == nil { + log.CtxDebugw(ctx, "succeed to broadcast reject unseal object tx", "tx_hash", txHash) + return txHash, nil + } + } + return nil, err } // DiscontinueBucket stops serving the bucket on the greenfield chain. From bcbbd0df1489e6a09800aa8a37843d8149d1c41e Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Sat, 17 Jun 2023 11:20:47 +0800 Subject: [PATCH 15/78] fix: modify the default gc opt to a suitable value (#613) * fix: modify the default gc opt to a suitable value * chore: add some gc logs --------- Co-authored-by: will-2012 --- modular/executor/execute_task.go | 4 ++++ modular/manager/manager_options.go | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 6d5f066c5..b9ee45d9a 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -221,9 +221,13 @@ func (e *ExecuteModular) HandleGCObjectTask(ctx context.Context, task coretask.G objectInfo := object.GetObjectInfo() currentGCObjectID = objectInfo.Id.Uint64() if currentGCBlockID < task.GetCurrentBlockNumber() { + log.Errorw("skip gc object", "object_info", objectInfo, + "task_current_gc_block_id", task.GetCurrentBlockNumber()) continue } if currentGCObjectID <= task.GetLastDeletedObjectId() { + log.Errorw("skip gc object", "object_info", objectInfo, + "task_last_deleted_object_id", task.GetLastDeletedObjectId()) continue } segmentCount := e.baseApp.PieceOp().SegmentPieceCount( diff --git a/modular/manager/manager_options.go b/modular/manager/manager_options.go index 374951ea6..730180ae5 100644 --- a/modular/manager/manager_options.go +++ b/modular/manager/manager_options.go @@ -40,10 +40,10 @@ const ( DefaultGlobalChallengePieceTaskCacheSize int = 4096 // DefaultGlobalBatchGcObjectTimeInterval defines the default interval for generating // gc object task. - DefaultGlobalBatchGcObjectTimeInterval int = 30 * 60 + DefaultGlobalBatchGcObjectTimeInterval int = 1 * 60 // DefaultGlobalGcObjectBlockInterval defines the default blocks number for getting // deleted objects. - DefaultGlobalGcObjectBlockInterval uint64 = 500 + DefaultGlobalGcObjectBlockInterval uint64 = 1000 // DefaultGlobalGcObjectSafeBlockDistance defines the default distance form current block // height to gc the deleted object. DefaultGlobalGcObjectSafeBlockDistance uint64 = 1000 From 13c27a580fa6d73d8752cc50b5c1359e9ca6cc11 Mon Sep 17 00:00:00 2001 From: will-2012 <117156346+will-2012@users.noreply.github.com> Date: Mon, 19 Jun 2023 14:08:26 +0800 Subject: [PATCH 16/78] fix: fix missing gc object (#621) --- modular/executor/execute_task.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index b9ee45d9a..44fbe9dc6 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -225,11 +225,6 @@ func (e *ExecuteModular) HandleGCObjectTask(ctx context.Context, task coretask.G "task_current_gc_block_id", task.GetCurrentBlockNumber()) continue } - if currentGCObjectID <= task.GetLastDeletedObjectId() { - log.Errorw("skip gc object", "object_info", objectInfo, - "task_last_deleted_object_id", task.GetLastDeletedObjectId()) - continue - } segmentCount := e.baseApp.PieceOp().SegmentPieceCount( objectInfo.GetPayloadSize(), storageParams.VersionedParams.GetMaxSegmentSize()) for segIdx := uint32(0); segIdx < segmentCount; segIdx++ { From 1500f58962fc5371f7957d9af39243bbea20a976 Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Mon, 19 Jun 2023 14:49:56 +0800 Subject: [PATCH 17/78] fix: rename authorizer to authenticator (#605) * fix: rename authorizer to authenticator * fix: fix signer var name * fix: rename authorization header error --------- Co-authored-by: VM --- base/errors.md | 2 +- base/gfspapp/app.go | 24 +++--- base/gfspapp/app_options.go | 14 ++-- ...izer_server.go => authenticator_server.go} | 19 +++-- .../gfspapp/{gfsp_mdmgr.go => gfsp_modmgr.go} | 0 base/gfspapp/grpc_server.go | 2 +- .../{authorizer.go => authenticator.go} | 30 +++---- base/gfspclient/client.go | 42 +++++----- base/gfspclient/gater.go | 4 +- base/gfspconfig/config.go | 18 ++-- cmd/storage_provider/main.go | 6 +- cmd/utils/init_env.go | 16 ++-- core/README.md | 2 +- core/module/README.md | 8 +- core/module/modular.go | 10 +-- core/module/module_const.go | 40 ++++----- core/module/null_modular.go | 4 +- .../authenticator.go} | 71 ++++++++-------- .../authenticator_options.go} | 8 +- .../off_chain_signer.go | 2 +- .../off_chain_signer_test.go | 2 +- modular/gater/admin_handler.go | 48 +++++------ modular/gater/bucket_handler.go | 20 ++--- modular/gater/errors.go | 4 +- modular/gater/object_handler.go | 82 +++++++++---------- modular/gater/request_context.go | 20 ++--- modular/signer/signer.go | 16 ++-- ...uthorization.proto => authenticator.proto} | 8 +- 28 files changed, 262 insertions(+), 260 deletions(-) rename base/gfspapp/{authorizer_server.go => authenticator_server.go} (76%) rename base/gfspapp/{gfsp_mdmgr.go => gfsp_modmgr.go} (100%) rename base/gfspclient/{authorizer.go => authenticator.go} (73%) rename modular/{authorizer/authorizer.go => authenticator/authenticator.go} (79%) rename modular/{authorizer/authorizer_options.go => authenticator/authenticator_options.go} (52%) rename modular/{authorizer => authenticator}/off_chain_signer.go (98%) rename modular/{authorizer => authenticator}/off_chain_signer_test.go (99%) rename proto/base/types/gfspserver/{authorization.proto => authenticator.proto} (94%) diff --git a/base/errors.md b/base/errors.md index 3090b3321..910a12224 100644 --- a/base/errors.md +++ b/base/errors.md @@ -31,7 +31,7 @@ It sorts from 50... ### Modular Code It sorts from 0... to 49 * `01`: is used for Approver modular code. -* `02`: is used for Authorizer modular code. +* `02`: is used for Authenticator modular code. * `03`: is used for Downloader modular code. * `04`: is used for TaskExecutor modular code. * `05`: is used for Gateway modular code. diff --git a/base/gfspapp/app.go b/base/gfspapp/app.go index 29bae3034..c7782b035 100644 --- a/base/gfspapp/app.go +++ b/base/gfspapp/app.go @@ -38,18 +38,18 @@ type GfSpBaseApp struct { rcmgr corercmgr.ResourceManager chain consensus.Consensus - approver module.Approver - authorizer module.Authorizer - downloader module.Downloader - executor module.TaskExecutor - gater module.Modular - manager module.Manager - p2p module.P2P - receiver module.Receiver - signer module.Signer - uploader module.Uploader - metrics module.Modular - pprof module.Modular + approver module.Approver + authenticator module.Authenticator + downloader module.Downloader + executor module.TaskExecutor + gater module.Modular + manager module.Manager + p2p module.P2P + receiver module.Receiver + signer module.Signer + uploader module.Uploader + metrics module.Modular + pprof module.Modular appCtx context.Context appCancel context.CancelFunc diff --git a/base/gfspapp/app_options.go b/base/gfspapp/app_options.go index 1f3ff751d..18404eb05 100644 --- a/base/gfspapp/app_options.go +++ b/base/gfspapp/app_options.go @@ -80,7 +80,7 @@ func DefaultStaticOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error { app.gcZombieRetry = cfg.Task.GcZombieTaskRetry app.gcMetaRetry = cfg.Task.GcMetaTaskRetry app.approver = &coremodule.NullModular{} - app.authorizer = &coremodule.NullModular{} + app.authenticator = &coremodule.NullModular{} app.downloader = &coremodule.NilModular{} app.executor = &coremodule.NilModular{} app.gater = &coremodule.NullModular{} @@ -122,8 +122,8 @@ func DefaultGfSpClientOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error if cfg.Endpoint.SignerEndpoint == "" { cfg.Endpoint.SignerEndpoint = cfg.GRPCAddress } - if cfg.Endpoint.AuthorizerEndpoint == "" { - cfg.Endpoint.AuthorizerEndpoint = cfg.GRPCAddress + if cfg.Endpoint.AuthenticatorEndpoint == "" { + cfg.Endpoint.AuthenticatorEndpoint = cfg.GRPCAddress } app.client = gfspclient.NewGfSpClient( cfg.Endpoint.ApproverEndpoint, @@ -134,7 +134,7 @@ func DefaultGfSpClientOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) error cfg.Endpoint.UploaderEndpoint, cfg.Endpoint.P2PEndpoint, cfg.Endpoint.SignerEndpoint, - cfg.Endpoint.AuthorizerEndpoint, + cfg.Endpoint.AuthenticatorEndpoint, !cfg.Monitor.DisableMetrics) return nil } @@ -380,8 +380,8 @@ func DefaultGfSpModulusOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) erro switch module.Name() { case coremodule.ApprovalModularName: app.approver = module.(coremodule.Approver) - case coremodule.AuthorizationModularName: - app.authorizer = module.(coremodule.Authorizer) + case coremodule.AuthenticationModularName: + app.authenticator = module.(coremodule.Authenticator) case coremodule.DownloadModularName: app.downloader = module.(coremodule.Downloader) case coremodule.ExecuteModularName: @@ -394,7 +394,7 @@ func DefaultGfSpModulusOption(app *GfSpBaseApp, cfg *gfspconfig.GfSpConfig) erro app.p2p = module.(coremodule.P2P) case coremodule.ReceiveModularName: app.receiver = module.(coremodule.Receiver) - case coremodule.SignerModularName: + case coremodule.SignModularName: app.signer = module.(coremodule.Signer) case coremodule.UploadModularName: app.uploader = module.(coremodule.Uploader) diff --git a/base/gfspapp/authorizer_server.go b/base/gfspapp/authenticator_server.go similarity index 76% rename from base/gfspapp/authorizer_server.go rename to base/gfspapp/authenticator_server.go index 97f728661..86a780597 100644 --- a/base/gfspapp/authorizer_server.go +++ b/base/gfspapp/authenticator_server.go @@ -11,19 +11,20 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" ) -var _ gfspserver.GfSpAuthorizationServiceServer = &GfSpBaseApp{} +var _ gfspserver.GfSpAuthenticationServiceServer = &GfSpBaseApp{} -func (g *GfSpBaseApp) GfSpVerifyAuthorize(ctx context.Context, req *gfspserver.GfSpAuthorizeRequest) (*gfspserver.GfSpAuthorizeResponse, error) { +func (g *GfSpBaseApp) GfSpVerifyAuthentication(ctx context.Context, req *gfspserver.GfSpAuthenticationRequest) ( + *gfspserver.GfSpAuthenticationResponse, error) { ctx = log.WithValue(ctx, log.CtxKeyBucketName, req.GetBucketName()) ctx = log.WithValue(ctx, log.CtxKeyObjectName, req.GetObjectName()) - log.CtxDebugw(ctx, "begin to authorize", "user", req.GetUserAccount(), "auth_type", req.GetAuthType()) + log.CtxDebugw(ctx, "begin to authenticate", "user", req.GetUserAccount(), "auth_type", req.GetAuthType()) startTime := time.Now() - allow, err := g.authorizer.VerifyAuthorize(ctx, coremodule.AuthOpType(req.GetAuthType()), + allow, err := g.authenticator.VerifyAuthentication(ctx, coremodule.AuthOpType(req.GetAuthType()), req.GetUserAccount(), req.GetBucketName(), req.GetObjectName()) metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_total_time").Observe(time.Since(startTime).Seconds()) - log.CtxDebugw(ctx, "finish to authorize", "user", req.GetUserAccount(), "auth_type", req.GetAuthType(), + log.CtxDebugw(ctx, "finish to authenticate", "user", req.GetUserAccount(), "auth_type", req.GetAuthType(), "allow", allow, "error", err) - return &gfspserver.GfSpAuthorizeResponse{ + return &gfspserver.GfSpAuthenticationResponse{ Err: gfsperrors.MakeGfSpError(err), Allowed: allow, }, nil @@ -32,7 +33,7 @@ func (g *GfSpBaseApp) GfSpVerifyAuthorize(ctx context.Context, req *gfspserver.G // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. func (g *GfSpBaseApp) GetAuthNonce(ctx context.Context, req *gfspserver.GetAuthNonceRequest) (*gfspserver.GetAuthNonceResponse, error) { log.CtxDebugw(ctx, "begin to get auth nonce", "user", req.GetAccountId(), "domain", req.GetDomain()) - resp, err := g.authorizer.GetAuthNonce(ctx, req.AccountId, req.Domain) + resp, err := g.authenticator.GetAuthNonce(ctx, req.AccountId, req.Domain) log.CtxDebugw(ctx, "finish to get auth nonce", "user", req.GetAccountId(), "domain", req.GetDomain(), "error", err) if err != nil { return &gfspserver.GetAuthNonceResponse{ @@ -52,7 +53,7 @@ func (g *GfSpBaseApp) GetAuthNonce(ctx context.Context, req *gfspserver.GetAuthN // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. func (g *GfSpBaseApp) UpdateUserPublicKey(ctx context.Context, req *gfspserver.UpdateUserPublicKeyRequest) (*gfspserver.UpdateUserPublicKeyResponse, error) { log.CtxDebugw(ctx, "begin to update user public key", "user", req.GetAccountId(), "domain", req.GetDomain(), "public_key", req.UserPublicKey) - resp, err := g.authorizer.UpdateUserPublicKey(ctx, req.AccountId, req.Domain, req.CurrentNonce, req.Nonce, req.UserPublicKey, req.ExpiryDate) + resp, err := g.authenticator.UpdateUserPublicKey(ctx, req.AccountId, req.Domain, req.CurrentNonce, req.Nonce, req.UserPublicKey, req.ExpiryDate) log.CtxDebugw(ctx, "finish to update user public key", "user", req.GetAccountId(), "domain", req.GetDomain(), "error", err) return &gfspserver.UpdateUserPublicKeyResponse{ Err: gfsperrors.MakeGfSpError(err), @@ -63,7 +64,7 @@ func (g *GfSpBaseApp) UpdateUserPublicKey(ctx context.Context, req *gfspserver.U // VerifyOffChainSignature verifies the signature signed by user's EDDSA private key. func (g *GfSpBaseApp) VerifyOffChainSignature(ctx context.Context, req *gfspserver.VerifyOffChainSignatureRequest) (*gfspserver.VerifyOffChainSignatureResponse, error) { log.CtxDebugw(ctx, "begin to verify off-chain signature", "user", req.GetAccountId(), "domain", req.GetDomain(), "off_chain_sig", req.OffChainSig, "real_msg_to_sign", req.RealMsgToSign) - resp, err := g.authorizer.VerifyOffChainSignature(ctx, req.AccountId, req.Domain, req.OffChainSig, req.RealMsgToSign) + resp, err := g.authenticator.VerifyOffChainSignature(ctx, req.AccountId, req.Domain, req.OffChainSig, req.RealMsgToSign) log.CtxDebugw(ctx, "finish to verify off-chain signature", "user", req.GetAccountId(), "domain", req.GetDomain(), "error", err) return &gfspserver.VerifyOffChainSignatureResponse{ Err: gfsperrors.MakeGfSpError(err), diff --git a/base/gfspapp/gfsp_mdmgr.go b/base/gfspapp/gfsp_modmgr.go similarity index 100% rename from base/gfspapp/gfsp_mdmgr.go rename to base/gfspapp/gfsp_modmgr.go diff --git a/base/gfspapp/grpc_server.go b/base/gfspapp/grpc_server.go index c610377b5..9c30e478c 100644 --- a/base/gfspapp/grpc_server.go +++ b/base/gfspapp/grpc_server.go @@ -32,7 +32,7 @@ func (g *GfSpBaseApp) newRpcServer(options ...grpc.ServerOption) { } g.server = grpc.NewServer(options...) gfspserver.RegisterGfSpApprovalServiceServer(g.server, g) - gfspserver.RegisterGfSpAuthorizationServiceServer(g.server, g) + gfspserver.RegisterGfSpAuthenticationServiceServer(g.server, g) gfspserver.RegisterGfSpDownloadServiceServer(g.server, g) gfspserver.RegisterGfSpManageServiceServer(g.server, g) gfspserver.RegisterGfSpP2PServiceServer(g.server, g) diff --git a/base/gfspclient/authorizer.go b/base/gfspclient/authenticator.go similarity index 73% rename from base/gfspclient/authorizer.go rename to base/gfspclient/authenticator.go index 9714e7fbb..0f5d637aa 100644 --- a/base/gfspclient/authorizer.go +++ b/base/gfspclient/authenticator.go @@ -11,27 +11,27 @@ import ( "google.golang.org/grpc" ) -func (s *GfSpClient) VerifyAuthorize(ctx context.Context, auth coremodule.AuthOpType, account, bucket, object string) (bool, error) { +func (s *GfSpClient) VerifyAuthentication(ctx context.Context, auth coremodule.AuthOpType, account, bucket, object string) (bool, error) { startTime := time.Now() defer metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_total_time").Observe(time.Since(startTime).Seconds()) - conn, connErr := s.Connection(ctx, s.authorizerEndpoint) + conn, connErr := s.Connection(ctx, s.authenticatorEndpoint) metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_create_conn_time").Observe(time.Since(startTime).Seconds()) if connErr != nil { - log.CtxErrorw(ctx, "client failed to connect authorizer", "error", connErr) + log.CtxErrorw(ctx, "client failed to connect authenticator", "error", connErr) return false, ErrRpcUnknown } defer conn.Close() - req := &gfspserver.GfSpAuthorizeRequest{ + req := &gfspserver.GfSpAuthenticationRequest{ AuthType: int32(auth), UserAccount: account, BucketName: bucket, ObjectName: object, } startRequestTime := time.Now() - resp, err := gfspserver.NewGfSpAuthorizationServiceClient(conn).GfSpVerifyAuthorize(ctx, req) + resp, err := gfspserver.NewGfSpAuthenticationServiceClient(conn).GfSpVerifyAuthentication(ctx, req) metrics.PerfAuthTimeHistogram.WithLabelValues("auth_client_network_time").Observe(time.Since(startRequestTime).Seconds()) if err != nil { - log.CtxErrorw(ctx, "client failed to verify authorize", "error", err) + log.CtxErrorw(ctx, "client failed to verify authentication", "error", err) return false, ErrRpcUnknown } if resp.GetErr() != nil { @@ -42,9 +42,9 @@ func (s *GfSpClient) VerifyAuthorize(ctx context.Context, auth coremodule.AuthOp // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. func (s *GfSpClient) GetAuthNonce(ctx context.Context, account string, domain string, opts ...grpc.CallOption) (currentNonce int32, nextNonce int32, currentPublicKey string, expiryDate int64, err error) { - conn, connErr := s.Connection(ctx, s.authorizerEndpoint) + conn, connErr := s.Connection(ctx, s.authenticatorEndpoint) if connErr != nil { - log.CtxErrorw(ctx, "client failed to connect authorizer", "error", connErr) + log.CtxErrorw(ctx, "client failed to connect authenticator", "error", connErr) return 0, 0, "", 0, ErrRpcUnknown } defer conn.Close() @@ -52,7 +52,7 @@ func (s *GfSpClient) GetAuthNonce(ctx context.Context, account string, domain st AccountId: account, Domain: domain, } - resp, err := gfspserver.NewGfSpAuthorizationServiceClient(conn).GetAuthNonce(ctx, req, opts...) + resp, err := gfspserver.NewGfSpAuthenticationServiceClient(conn).GetAuthNonce(ctx, req, opts...) ctx = log.Context(ctx, resp) if err != nil { log.CtxErrorw(ctx, "failed to get auth nonce rpc", "error", err) @@ -66,9 +66,9 @@ func (s *GfSpClient) GetAuthNonce(ctx context.Context, account string, domain st // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. func (s *GfSpClient) UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, userPublicKey string, expiryDate int64, opts ...grpc.CallOption) (bool, error) { - conn, connErr := s.Connection(ctx, s.authorizerEndpoint) + conn, connErr := s.Connection(ctx, s.authenticatorEndpoint) if connErr != nil { - log.CtxErrorw(ctx, "client failed to connect authorizer", "error", connErr) + log.CtxErrorw(ctx, "client failed to connect authenticator", "error", connErr) return false, ErrRpcUnknown } req := &gfspserver.UpdateUserPublicKeyRequest{ @@ -79,7 +79,7 @@ func (s *GfSpClient) UpdateUserPublicKey(ctx context.Context, account string, do UserPublicKey: userPublicKey, ExpiryDate: expiryDate, } - resp, err := gfspserver.NewGfSpAuthorizationServiceClient(conn).UpdateUserPublicKey(ctx, req, opts...) + resp, err := gfspserver.NewGfSpAuthenticationServiceClient(conn).UpdateUserPublicKey(ctx, req, opts...) ctx = log.Context(ctx, resp) if err != nil { log.CtxErrorw(ctx, "failed to update user public key rpc", "error", err) @@ -93,9 +93,9 @@ func (s *GfSpClient) UpdateUserPublicKey(ctx context.Context, account string, do // VerifyOffChainSignature verifies the signature signed by user's EDDSA private key. func (s *GfSpClient) VerifyOffChainSignature(ctx context.Context, account string, domain string, offChainSig string, realMsgToSign string, opts ...grpc.CallOption) (bool, error) { - conn, connErr := s.Connection(ctx, s.authorizerEndpoint) + conn, connErr := s.Connection(ctx, s.authenticatorEndpoint) if connErr != nil { - log.CtxErrorw(ctx, "client failed to connect authorizer", "error", connErr) + log.CtxErrorw(ctx, "client failed to connect authenticator", "error", connErr) return false, ErrRpcUnknown } req := &gfspserver.VerifyOffChainSignatureRequest{ @@ -104,7 +104,7 @@ func (s *GfSpClient) VerifyOffChainSignature(ctx context.Context, account string OffChainSig: offChainSig, RealMsgToSign: realMsgToSign, } - resp, err := gfspserver.NewGfSpAuthorizationServiceClient(conn).VerifyOffChainSignature(ctx, req, opts...) + resp, err := gfspserver.NewGfSpAuthenticationServiceClient(conn).VerifyOffChainSignature(ctx, req, opts...) ctx = log.Context(ctx, resp) if err != nil { log.CtxErrorw(ctx, "failed to verify off-chain signature rpc", "error", err) diff --git a/base/gfspclient/client.go b/base/gfspclient/client.go index eeda54286..bf0bf6a05 100644 --- a/base/gfspclient/client.go +++ b/base/gfspclient/client.go @@ -35,15 +35,15 @@ var ( ) type GfSpClient struct { - approverEndpoint string - managerEndpoint string - downloaderEndpoint string - receiverEndpoint string - metadataEndpoint string - uploaderEndpoint string - p2pEndpoint string - signerEndpoint string - authorizerEndpoint string + approverEndpoint string + managerEndpoint string + downloaderEndpoint string + receiverEndpoint string + metadataEndpoint string + uploaderEndpoint string + p2pEndpoint string + signerEndpoint string + authenticatorEndpoint string mux sync.RWMutex managerConn *grpc.ClientConn @@ -63,19 +63,19 @@ func NewGfSpClient( uploaderEndpoint string, p2pEndpoint string, signerEndpoint string, - authorizerEndpoint string, + authenticatorEndpoint string, metrics bool) *GfSpClient { return &GfSpClient{ - approverEndpoint: approverEndpoint, - managerEndpoint: managerEndpoint, - downloaderEndpoint: downloaderEndpoint, - receiverEndpoint: receiverEndpoint, - metadataEndpoint: metadataEndpoint, - uploaderEndpoint: uploaderEndpoint, - p2pEndpoint: p2pEndpoint, - signerEndpoint: signerEndpoint, - authorizerEndpoint: authorizerEndpoint, - metrics: metrics, + approverEndpoint: approverEndpoint, + managerEndpoint: managerEndpoint, + downloaderEndpoint: downloaderEndpoint, + receiverEndpoint: receiverEndpoint, + metadataEndpoint: metadataEndpoint, + uploaderEndpoint: uploaderEndpoint, + p2pEndpoint: p2pEndpoint, + signerEndpoint: signerEndpoint, + authenticatorEndpoint: authenticatorEndpoint, + metrics: metrics, } } @@ -156,7 +156,7 @@ func (s *GfSpClient) SignerConn(ctx context.Context, opts ...grpc.DialOption) (* return s.signerConn, nil } -func (s *GfSpClient) HttpClient(ctx context.Context) *http.Client { +func (s *GfSpClient) HTTPClient(ctx context.Context) *http.Client { s.mux.Lock() defer s.mux.Unlock() if s.httpClient == nil { diff --git a/base/gfspclient/gater.go b/base/gfspclient/gater.go index e158c546f..87e507df9 100644 --- a/base/gfspclient/gater.go +++ b/base/gfspclient/gater.go @@ -54,7 +54,7 @@ func (s *GfSpClient) ReplicatePieceToSecondary( receiveHeader := hex.EncodeToString(receiveMsg) req.Header.Add(GnfdReplicatePieceApprovalHeader, approvalHeader) req.Header.Add(GnfdReceiveMsgHeader, receiveHeader) - resp, err := s.HttpClient(ctx).Do(req) + resp, err := s.HTTPClient(ctx).Do(req) if err != nil { return err } @@ -87,7 +87,7 @@ func (s *GfSpClient) DoneReplicatePieceToSecondary(ctx context.Context, endpoint receiveHeader := hex.EncodeToString(receiveMsg) req.Header.Add(GnfdReplicatePieceApprovalHeader, approvalHeader) req.Header.Add(GnfdReceiveMsgHeader, receiveHeader) - resp, err := s.HttpClient(ctx).Do(req) + resp, err := s.HTTPClient(ctx).Do(req) if err != nil { return nil, nil, err } diff --git a/base/gfspconfig/config.go b/base/gfspconfig/config.go index b853f12b7..38819aeae 100644 --- a/base/gfspconfig/config.go +++ b/base/gfspconfig/config.go @@ -99,15 +99,15 @@ type SpAccountConfig struct { } type EndpointConfig struct { - ApproverEndpoint string - ManagerEndpoint string - DownloaderEndpoint string - ReceiverEndpoint string - MetadataEndpoint string - UploaderEndpoint string - P2PEndpoint string - SignerEndpoint string - AuthorizerEndpoint string + ApproverEndpoint string + ManagerEndpoint string + DownloaderEndpoint string + ReceiverEndpoint string + MetadataEndpoint string + UploaderEndpoint string + P2PEndpoint string + SignerEndpoint string + AuthenticatorEndpoint string } type ApprovalConfig struct { diff --git a/cmd/storage_provider/main.go b/cmd/storage_provider/main.go index 2fa5f02c8..63e37e5f3 100644 --- a/cmd/storage_provider/main.go +++ b/cmd/storage_provider/main.go @@ -12,7 +12,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/cmd/utils" "github.com/bnb-chain/greenfield-storage-provider/core/module" "github.com/bnb-chain/greenfield-storage-provider/modular/approver" - "github.com/bnb-chain/greenfield-storage-provider/modular/authorizer" + "github.com/bnb-chain/greenfield-storage-provider/modular/authenticator" "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer" "github.com/bnb-chain/greenfield-storage-provider/modular/downloader" "github.com/bnb-chain/greenfield-storage-provider/modular/executor" @@ -33,14 +33,14 @@ import ( // New module func is help module manager to init the module instance. func registerModular() { gfspapp.RegisterModular(module.ApprovalModularName, module.ApprovalModularDescription, approver.NewApprovalModular) - gfspapp.RegisterModular(module.AuthorizationModularName, module.AuthorizationModularDescription, authorizer.NewAuthorizeModular) + gfspapp.RegisterModular(module.AuthenticationModularName, module.AuthenticationModularDescription, authenticator.NewAuthenticationModular) gfspapp.RegisterModular(module.DownloadModularName, module.DownloadModularDescription, downloader.NewDownloadModular) gfspapp.RegisterModular(module.ExecuteModularName, module.ExecuteModularDescription, executor.NewExecuteModular) gfspapp.RegisterModular(module.GateModularName, module.GateModularDescription, gater.NewGateModular) gfspapp.RegisterModular(module.ManageModularName, module.ManageModularDescription, manager.NewManageModular) gfspapp.RegisterModular(module.P2PModularName, module.P2PModularDescription, p2p.NewP2PModular) gfspapp.RegisterModular(module.ReceiveModularName, module.ReceiveModularDescription, receiver.NewReceiveModular) - gfspapp.RegisterModular(module.SignerModularName, module.SignerModularDescription, signer.NewSignModular) + gfspapp.RegisterModular(module.SignModularName, module.SignModularDescription, signer.NewSignModular) gfspapp.RegisterModular(metadata.MetadataModularName, metadata.MetadataModularDescription, metadata.NewMetadataModular) gfspapp.RegisterModular(module.UploadModularName, module.UploadModularDescription, uploader.NewUploadModular) gfspapp.RegisterModular(blocksyncer.BlockSyncerModularName, blocksyncer.BlockSyncerModularDescription, blocksyncer.NewBlockSyncerModular) diff --git a/cmd/utils/init_env.go b/cmd/utils/init_env.go index 8936f5070..26f693597 100644 --- a/cmd/utils/init_env.go +++ b/cmd/utils/init_env.go @@ -4,17 +4,17 @@ import ( "errors" "os" - "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" - "github.com/bnb-chain/greenfield-storage-provider/base/gfspclient" - "github.com/bnb-chain/greenfield-storage-provider/base/gnfd" - "github.com/bnb-chain/greenfield-storage-provider/core/spdb" - "github.com/bnb-chain/greenfield-storage-provider/store/sqldb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/pelletier/go-toml/v2" "github.com/urfave/cli/v2" + "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" + "github.com/bnb-chain/greenfield-storage-provider/base/gfspclient" "github.com/bnb-chain/greenfield-storage-provider/base/gfspconfig" + "github.com/bnb-chain/greenfield-storage-provider/base/gnfd" + "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" + "github.com/bnb-chain/greenfield-storage-provider/store/sqldb" "github.com/bnb-chain/greenfield-storage-provider/util" ) @@ -123,8 +123,8 @@ func MakeGfSpClient(cfg *gfspconfig.GfSpConfig) *gfspclient.GfSpClient { if len(cfg.Endpoint.SignerEndpoint) == 0 { cfg.Endpoint.SignerEndpoint = cfg.GRPCAddress } - if len(cfg.Endpoint.AuthorizerEndpoint) == 0 { - cfg.Endpoint.AuthorizerEndpoint = cfg.GRPCAddress + if len(cfg.Endpoint.AuthenticatorEndpoint) == 0 { + cfg.Endpoint.AuthenticatorEndpoint = cfg.GRPCAddress } client := gfspclient.NewGfSpClient( cfg.Endpoint.ApproverEndpoint, @@ -135,7 +135,7 @@ func MakeGfSpClient(cfg *gfspconfig.GfSpConfig) *gfspclient.GfSpClient { cfg.Endpoint.UploaderEndpoint, cfg.Endpoint.P2PEndpoint, cfg.Endpoint.SignerEndpoint, - cfg.Endpoint.AuthorizerEndpoint, + cfg.Endpoint.AuthenticatorEndpoint, false) return client } diff --git a/core/README.md b/core/README.md index 9c631045c..27d5b5110 100644 --- a/core/README.md +++ b/core/README.md @@ -45,7 +45,7 @@ Infrastructure interface includes: ### Special Modular * [Approver](./module/README.md) : Approver is the modular to handle ask approval request, handles CreateBucketApproval and CreateObjectApproval. -* [Authorizer](./module/README.md): Authorizer is the modular to authority verification. +* [Authenticator](./module/README.md): Authenticator is the modular to authentication verification. * [Downloader](./module/README.md): Downloader is the modular to handle get object request from user account, and get challenge info request from other components in the system. * [TaskExecutor](./module/README.md): TaskExecutor is the modular to handle background task, diff --git a/core/module/README.md b/core/module/README.md index dc3495377..79fa626be 100644 --- a/core/module/README.md +++ b/core/module/README.md @@ -19,14 +19,14 @@ So the Front Modular has three interfaces for each task type, `PreHandleXXXTask` ## Background Modular Background Modular handles the SP inner task, since it is internally generated, the correctness of the information can be guaranteed, so only -have one interface`HandleXXXTask`. Background Modular includes: `Authorizer`, +have one interface`HandleXXXTask`. Background Modular includes: `Authenticator`, `TaskExecutor`,`Manager`, `P2P`, `c` and `Signer`. # Modular Type The GfSp framework specifies the following modular: `Gater`, `Approver`, -`Authorizer`, `Uploader`, `Downloader`, `Manager`, `P2P`, `Receiver`, +`Authenticator`, `Uploader`, `Downloader`, `Manager`, `P2P`, `Receiver`, `Signer`and `Metadata`. The GfSp framework also supports extending more customized mudolar as needed. As long as it is registered in GfSp framework and executes the modular interface, it will be initialized and scheduled. @@ -41,8 +41,8 @@ modular file. Approver is the modular to handle ask approval request, handles CreateBucketApproval and CreateObjectApproval. -## Authorizer -Authorizer is the modular to authority verification. +## Authenticator +Authenticator is the modular to authentication verification. ## Downloader Downloader is the modular to handle get object request from user account, diff --git a/core/module/modular.go b/core/module/modular.go index a0a095e82..48106001a 100644 --- a/core/module/modular.go +++ b/core/module/modular.go @@ -24,7 +24,7 @@ type Modular interface { ReleaseResource(ctx context.Context, scope rcmgr.ResourceScopeSpan) } -// AuthOpType defines the operator type used to authority verification. +// AuthOpType defines the operator type used to authentication verification. type AuthOpType int32 const ( @@ -48,11 +48,11 @@ const ( AuthOpTypeListBucketReadRecord ) -// Authorizer is the interface to authority verification modular. -type Authorizer interface { +// Authenticator is the interface to authentication verification modular. +type Authenticator interface { Modular - // VerifyAuthorize verifies the operator authority. - VerifyAuthorize(ctx context.Context, auth AuthOpType, account, bucket, object string) (bool, error) + // VerifyAuthentication verifies the operator authentication. + VerifyAuthentication(ctx context.Context, auth AuthOpType, account, bucket, object string) (bool, error) // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. GetAuthNonce(ctx context.Context, account string, domain string) (*spdb.OffChainAuthKey, error) // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. diff --git a/core/module/module_const.go b/core/module/module_const.go index 945067258..4264a2724 100644 --- a/core/module/module_const.go +++ b/core/module/module_const.go @@ -5,24 +5,24 @@ import ( ) var ( - ApprovalModularName = strings.ToLower("Approval") - ApprovalModularDescription = "Handles the ask crate bucket/object and replicates piece approval request." - AuthorizationModularName = strings.ToLower("Authorizer") - AuthorizationModularDescription = "Checks authorizations." - DownloadModularName = strings.ToLower("Downloader") - DownloadModularDescription = "Downloads object and gets challenge info and statistical read traffic from the backend." - ExecuteModularName = strings.ToLower("TaskExecutor") - ExecuteModularDescription = "Executes background tasks." - GateModularName = strings.ToLower("Gateway") - GateModularDescription = "Receives the user request and routes to the responding service." - ManageModularName = strings.ToLower("Manager") - ManageModularDescription = "Manages SPs and schedules tasks." - P2PModularName = strings.ToLower("p2p") - P2PModularDescription = "Communicates between SPs on p2p protocol." - ReceiveModularName = strings.ToLower("Receiver") - ReceiveModularDescription = "Receives data pieces of an object from other storage provider and store." - SignerModularName = strings.ToLower("Signer") - SignerModularDescription = "Signs the transaction and broadcasts to chain." - UploadModularName = strings.ToLower("Uploader") - UploadModularDescription = "Uploads object payload to primary SP." + ApprovalModularName = strings.ToLower("Approval") + ApprovalModularDescription = "Handles the ask crate bucket/object and replicates piece approval request." + AuthenticationModularName = strings.ToLower("Authenticator") + AuthenticationModularDescription = "Checks authentication." + DownloadModularName = strings.ToLower("Downloader") + DownloadModularDescription = "Downloads object and gets challenge info and statistical read traffic from the backend." + ExecuteModularName = strings.ToLower("TaskExecutor") + ExecuteModularDescription = "Executes background tasks." + GateModularName = strings.ToLower("Gateway") + GateModularDescription = "Receives the user request and routes to the responding service." + ManageModularName = strings.ToLower("Manager") + ManageModularDescription = "Manages SPs and schedules tasks." + P2PModularName = strings.ToLower("p2p") + P2PModularDescription = "Communicates between SPs on p2p protocol." + ReceiveModularName = strings.ToLower("Receiver") + ReceiveModularDescription = "Receives data pieces of an object from other storage provider and store." + SignModularName = strings.ToLower("Signer") + SignModularDescription = "Signs the transaction and broadcasts to chain." + UploadModularName = strings.ToLower("Uploader") + UploadModularDescription = "Uploads object payload to primary SP." ) diff --git a/core/module/null_modular.go b/core/module/null_modular.go index 2770d027f..3b7a4d584 100644 --- a/core/module/null_modular.go +++ b/core/module/null_modular.go @@ -20,7 +20,7 @@ var _ Modular = (*NullModular)(nil) var _ Approver = (*NullModular)(nil) var _ Uploader = (*NullModular)(nil) var _ Manager = (*NullModular)(nil) -var _ Authorizer = (*NullModular)(nil) +var _ Authenticator = (*NullModular)(nil) type NullModular struct{} @@ -96,7 +96,7 @@ func (*NullModular) HandleDownloadObjectTask(context.Context, task.DownloadObjec func (*NullModular) HandleChallengePieceTask(context.Context, task.ChallengePieceTask) error { return ErrNilModular } -func (*NullModular) VerifyAuthorize(context.Context, AuthOpType, string, string, string) (bool, error) { +func (*NullModular) VerifyAuthentication(context.Context, AuthOpType, string, string, string) (bool, error) { return false, ErrNilModular } diff --git a/modular/authorizer/authorizer.go b/modular/authenticator/authenticator.go similarity index 79% rename from modular/authorizer/authorizer.go rename to modular/authenticator/authenticator.go index 5667606d8..cfb3952f7 100644 --- a/modular/authorizer/authorizer.go +++ b/modular/authenticator/authenticator.go @@ -1,4 +1,4 @@ -package authorizer +package authenticator import ( "context" @@ -9,6 +9,9 @@ import ( "strings" "time" + paymenttypes "github.com/bnb-chain/greenfield/x/payment/types" + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/core/module" @@ -17,43 +20,41 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" - paymenttypes "github.com/bnb-chain/greenfield/x/payment/types" - storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) var ( - ErrUnsupportedAuthType = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20001, "unsupported auth op type") - ErrMismatchSp = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20002, "mismatched primary sp") - ErrNotCreatedState = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20003, "object has not been created state") - ErrNotSealedState = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20004, "object has not been sealed state") - ErrPaymentState = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20005, "payment account is not active") - ErrNoSuchAccount = gfsperrors.Register(module.AuthorizationModularName, http.StatusNotFound, 20006, "no such account") - ErrNoSuchBucket = gfsperrors.Register(module.AuthorizationModularName, http.StatusNotFound, 20007, "no such bucket") - ErrNoSuchObject = gfsperrors.Register(module.AuthorizationModularName, http.StatusNotFound, 20008, "no such object") - ErrRepeatedBucket = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20009, "repeated bucket") - ErrRepeatedObject = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20010, "repeated object") - ErrNoPermission = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20011, "no permission") + ErrUnsupportedAuthType = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20001, "unsupported auth op type") + ErrMismatchSp = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20002, "mismatched primary sp") + ErrNotCreatedState = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20003, "object has not been created state") + ErrNotSealedState = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20004, "object has not been sealed state") + ErrPaymentState = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20005, "payment account is not active") + ErrNoSuchAccount = gfsperrors.Register(module.AuthenticationModularName, http.StatusNotFound, 20006, "no such account") + ErrNoSuchBucket = gfsperrors.Register(module.AuthenticationModularName, http.StatusNotFound, 20007, "no such bucket") + ErrNoSuchObject = gfsperrors.Register(module.AuthenticationModularName, http.StatusNotFound, 20008, "no such object") + ErrRepeatedBucket = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20009, "repeated bucket") + ErrRepeatedObject = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20010, "repeated object") + ErrNoPermission = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20011, "no permission") - ErrBadSignature = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20012, "bad signature") - ErrSignedMsgFormat = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20013, "signed msg must be formatted as ${actionContent}_${expiredTimestamp}") - ErrExpiredTimestampFormat = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20014, "expiredTimestamp in signed msg must be a unix epoch time in milliseconds") - ErrPublicKeyExpired = gfsperrors.Register(module.AuthorizationModularName, http.StatusBadRequest, 20015, "user public key is expired") + ErrBadSignature = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20012, "bad signature") + ErrSignedMsgFormat = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20013, "signed msg must be formatted as ${actionContent}_${expiredTimestamp}") + ErrExpiredTimestampFormat = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20014, "expiredTimestamp in signed msg must be a unix epoch time in milliseconds") + ErrPublicKeyExpired = gfsperrors.Register(module.AuthenticationModularName, http.StatusBadRequest, 20015, "user public key is expired") - ErrConsensus = gfsperrors.Register(module.AuthorizationModularName, http.StatusInternalServerError, 25002, "server slipped away, try again later") + ErrConsensus = gfsperrors.Register(module.AuthenticationModularName, http.StatusInternalServerError, 25002, "server slipped away, try again later") ) -var _ module.Authorizer = &AuthorizeModular{} +var _ module.Authenticator = &AuthenticationModular{} -type AuthorizeModular struct { +type AuthenticationModular struct { baseApp *gfspapp.GfSpBaseApp scope rcmgr.ResourceScope } -func (a *AuthorizeModular) Name() string { - return module.AuthorizationModularName +func (a *AuthenticationModular) Name() string { + return module.AuthenticationModularName } -func (a *AuthorizeModular) Start(ctx context.Context) error { +func (a *AuthenticationModular) Start(ctx context.Context) error { scope, err := a.baseApp.ResourceManager().OpenService(a.Name()) if err != nil { return err @@ -62,12 +63,12 @@ func (a *AuthorizeModular) Start(ctx context.Context) error { return nil } -func (a *AuthorizeModular) Stop(ctx context.Context) error { +func (a *AuthenticationModular) Stop(ctx context.Context) error { a.scope.Release() return nil } -func (a *AuthorizeModular) ReserveResource( +func (a *AuthenticationModular) ReserveResource( ctx context.Context, state *rcmgr.ScopeStat) ( rcmgr.ResourceScopeSpan, @@ -83,7 +84,7 @@ func (a *AuthorizeModular) ReserveResource( return span, nil } -func (a *AuthorizeModular) ReleaseResource( +func (a *AuthenticationModular) ReleaseResource( ctx context.Context, span rcmgr.ResourceScopeSpan) { span.Done() @@ -94,7 +95,7 @@ const ( ) // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. -func (a *AuthorizeModular) GetAuthNonce(ctx context.Context, account string, domain string) (*spdb.OffChainAuthKey, error) { +func (a *AuthenticationModular) GetAuthNonce(ctx context.Context, account string, domain string) (*spdb.OffChainAuthKey, error) { authKey, err := a.baseApp.GfSpDB().GetAuthKey(account, domain) if err != nil { log.CtxErrorw(ctx, "failed to GetAuthKey", "error", err) @@ -105,7 +106,7 @@ func (a *AuthorizeModular) GetAuthNonce(ctx context.Context, account string, dom } // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. -func (a *AuthorizeModular) UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, userPublicKey string, expiryDate int64) (bool, error) { +func (a *AuthenticationModular) UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, userPublicKey string, expiryDate int64) (bool, error) { err := a.baseApp.GfSpDB().UpdateAuthKey(account, domain, currentNonce, nonce, userPublicKey, time.UnixMilli(expiryDate)) if err != nil { log.CtxErrorw(ctx, "failed to updateUserPublicKey when saving key") @@ -116,7 +117,7 @@ func (a *AuthorizeModular) UpdateUserPublicKey(ctx context.Context, account stri } // VerifyOffChainSignature verifies the signature signed by user's EDDSA private key. -func (a *AuthorizeModular) VerifyOffChainSignature(ctx context.Context, account string, domain string, offChainSig string, realMsgToSign string) (bool, error) { +func (a *AuthenticationModular) VerifyOffChainSignature(ctx context.Context, account string, domain string, offChainSig string, realMsgToSign string) (bool, error) { signature, err := hex.DecodeString(offChainSig) if err != nil { return false, ErrBadSignature @@ -159,9 +160,9 @@ func (a *AuthorizeModular) VerifyOffChainSignature(ctx context.Context, account return true, nil } -// VerifyAuthorize verifies the account has the operation's permission. +// VerifyAuthentication verifies the account has the operation's permission. // TODO:: supports permission path verification and query -func (a *AuthorizeModular) VerifyAuthorize( +func (a *AuthenticationModular) VerifyAuthentication( ctx context.Context, authType coremodule.AuthOpType, account, bucket, object string) ( @@ -184,7 +185,7 @@ func (a *AuthorizeModular) VerifyAuthorize( bucketInfo, _ := a.baseApp.Consensus().QueryBucketInfo(ctx, bucket) metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_create_bucket_approval_query_bucket_time").Observe(time.Since(queryTime).Seconds()) if bucketInfo != nil { - log.CtxErrorw(ctx, "failed to verify authorize of asking create bucket "+ + log.CtxErrorw(ctx, "failed to verify authentication of asking create bucket "+ "approval, bucket repeated", "bucket", bucket) return false, ErrRepeatedBucket } @@ -194,12 +195,12 @@ func (a *AuthorizeModular) VerifyAuthorize( bucketInfo, objectInfo, _ := a.baseApp.Consensus().QueryBucketInfoAndObjectInfo(ctx, bucket, object) metrics.PerfAuthTimeHistogram.WithLabelValues("auth_server_create_object_approval_query_bucket_object_time").Observe(time.Since(queryTime).Seconds()) if bucketInfo == nil { - log.CtxErrorw(ctx, "failed to verify authorize of asking create object "+ + log.CtxErrorw(ctx, "failed to verify authentication of asking create object "+ "approval, no such bucket to ask create object approval", "bucket", bucket, "object", object) return false, ErrNoSuchBucket } if objectInfo != nil { - log.CtxErrorw(ctx, "failed to verify authorize of asking create object "+ + log.CtxErrorw(ctx, "failed to verify authentication of asking create object "+ "approval, object has been created", "bucket", bucket, "object", object) return false, ErrRepeatedObject } diff --git a/modular/authorizer/authorizer_options.go b/modular/authenticator/authenticator_options.go similarity index 52% rename from modular/authorizer/authorizer_options.go rename to modular/authenticator/authenticator_options.go index fbeaa6565..c4a647e1b 100644 --- a/modular/authorizer/authorizer_options.go +++ b/modular/authenticator/authenticator_options.go @@ -1,4 +1,4 @@ -package authorizer +package authenticator import ( "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" @@ -6,7 +6,7 @@ import ( coremodule "github.com/bnb-chain/greenfield-storage-provider/core/module" ) -func NewAuthorizeModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (coremodule.Modular, error) { - authorize := &AuthorizeModular{baseApp: app} - return authorize, nil +func NewAuthenticationModular(app *gfspapp.GfSpBaseApp, cfg *gfspconfig.GfSpConfig) (coremodule.Modular, error) { + auth := &AuthenticationModular{baseApp: app} + return auth, nil } diff --git a/modular/authorizer/off_chain_signer.go b/modular/authenticator/off_chain_signer.go similarity index 98% rename from modular/authorizer/off_chain_signer.go rename to modular/authenticator/off_chain_signer.go index afca8d2e5..dad5ea0db 100644 --- a/modular/authorizer/off_chain_signer.go +++ b/modular/authenticator/off_chain_signer.go @@ -1,4 +1,4 @@ -package authorizer +package authenticator import ( "encoding/hex" diff --git a/modular/authorizer/off_chain_signer_test.go b/modular/authenticator/off_chain_signer_test.go similarity index 99% rename from modular/authorizer/off_chain_signer_test.go rename to modular/authenticator/off_chain_signer_test.go index a4b7b32c9..c795418ea 100644 --- a/modular/authorizer/off_chain_signer_test.go +++ b/modular/authenticator/off_chain_signer_test.go @@ -1,4 +1,4 @@ -package authorizer +package authenticator import ( "bytes" diff --git a/modular/gater/admin_handler.go b/modular/gater/admin_handler.go index 5137f5e80..d97a267e3 100644 --- a/modular/gater/admin_handler.go +++ b/modular/gater/admin_handler.go @@ -9,6 +9,7 @@ import ( "time" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" sdktypes "github.com/cosmos/cosmos-sdk/types" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" @@ -18,7 +19,6 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/modular/p2p/p2pnode" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/util" - storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) // getApprovalHandler handles the get create bucket/object approval request. @@ -33,7 +33,7 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) approvalMsg []byte createBucketApproval = storagetypes.MsgCreateBucket{} createObjectApproval = storagetypes.MsgCreateObject{} - authorized bool + authenticated bool approved bool ) defer func() { @@ -76,17 +76,17 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) err = ErrValidateMsg return } - if reqCtx.NeedVerifyAuthorizer() { - startVerifyAuthorize := time.Now() - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize( + if reqCtx.NeedVerifyAuthentication() { + startVerifyAuthentication := time.Now() + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication( reqCtx.Context(), coremodule.AuthOpAskCreateBucketApproval, reqCtx.Account(), createBucketApproval.GetBucketName(), "") - metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthorize).Seconds()) + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthentication).Seconds()) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -122,18 +122,18 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) err = ErrValidateMsg return } - if reqCtx.NeedVerifyAuthorizer() { - startVerifyAuthorize := time.Now() - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize( + if reqCtx.NeedVerifyAuthentication() { + startVerifyAuthentication := time.Now() + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication( reqCtx.Context(), coremodule.AuthOpAskCreateObjectApproval, reqCtx.Account(), createObjectApproval.GetBucketName(), createObjectApproval.GetObjectName()) - metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthorize).Seconds()) + metrics.PerfGetApprovalTimeHistogram.WithLabelValues("verify_authorize").Observe(time.Since(startVerifyAuthentication).Seconds()) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -169,12 +169,12 @@ func (g *GateModular) getApprovalHandler(w http.ResponseWriter, r *http.Request) // can verify the info whether are correct by comparing with the greenfield info. func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Request) { var ( - err error - reqCtx *RequestContext - authorized bool - integrity []byte - checksums [][]byte - data []byte + err error + reqCtx *RequestContext + authenticated bool + integrity []byte + checksums [][]byte + data []byte ) startTime := time.Now() defer func() { @@ -215,17 +215,17 @@ func (g *GateModular) getChallengeInfoHandler(w http.ResponseWriter, r *http.Req } return } - if reqCtx.NeedVerifyAuthorizer() { + if reqCtx.NeedVerifyAuthentication() { authTime := time.Now() - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeGetChallengePieceInfo, reqCtx.Account(), objectInfo.GetBucketName(), objectInfo.GetObjectName()) metrics.PerfChallengeTimeHistogram.WithLabelValues("challenge_auth_time").Observe(time.Since(authTime).Seconds()) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "failed to get challenge info due to no permission") err = ErrNoPermission return diff --git a/modular/gater/bucket_handler.go b/modular/gater/bucket_handler.go index f345e2bab..1b74cd432 100644 --- a/modular/gater/bucket_handler.go +++ b/modular/gater/bucket_handler.go @@ -17,7 +17,7 @@ func (g *GateModular) getBucketReadQuotaHandler(w http.ResponseWriter, r *http.R var ( err error reqCtx *RequestContext - authorized bool + authenticated bool bucketInfo *storagetypes.BucketInfo charge, free, consume uint64 ) @@ -37,14 +37,14 @@ func (g *GateModular) getBucketReadQuotaHandler(w http.ResponseWriter, r *http.R if err != nil { return } - if reqCtx.NeedVerifyAuthorizer() { - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + if reqCtx.NeedVerifyAuthentication() { + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeGetBucketQuota, reqCtx.Account(), reqCtx.bucketName, "") if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -99,7 +99,7 @@ func (g *GateModular) listBucketReadRecordHandler(w http.ResponseWriter, r *http var ( err error reqCtx *RequestContext - authorized bool + authenticated bool startTimestampUs int64 endTimestampUs int64 maxRecordNum int64 @@ -122,14 +122,14 @@ func (g *GateModular) listBucketReadRecordHandler(w http.ResponseWriter, r *http if err != nil { return } - if reqCtx.NeedVerifyAuthorizer() { - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + if reqCtx.NeedVerifyAuthentication() { + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeListBucketReadRecord, reqCtx.Account(), reqCtx.bucketName, "") if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return diff --git a/modular/gater/errors.go b/modular/gater/errors.go index 31c39a8c1..9c1d2e637 100644 --- a/modular/gater/errors.go +++ b/modular/gater/errors.go @@ -12,7 +12,7 @@ import ( var ( ErrUnsupportedSignType = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50001, "unsupported sign type") - ErrAuthorizationFormat = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50002, "authorization format error") + ErrAuthorizationHeaderFormat = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50002, "authorization header format error") ErrRequestConsistent = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50003, "request is tampered") ErrNoPermission = gfsperrors.Register(module.GateModularName, http.StatusUnauthorized, 50004, "no permission") ErrDecodeMsg = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50005, "gnfd msg encoding error") @@ -37,7 +37,7 @@ var ( "The expiry date is expected to be within "+strconv.Itoa(int(MaxExpiryAgeInSec))+" seconds and formatted in YYYY-DD-MM HH:MM:SS 'GMT'Z, e.g. 2023-04-20 16:34:12 GMT+08:00 . ") ErrInvalidExpiryDate = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50024, "The expiry parameter is incorrect. "+ "The expiry date is expected to be within "+strconv.Itoa(int(MaxExpiryAgeInSec))+" seconds and formatted in YYYY-DD-MM HH:MM:SS 'GMT'Z, e.g. 2023-04-20 16:34:12 GMT+08:00 . ") - ErrNoSuchObject = gfsperrors.Register(module.AuthorizationModularName, http.StatusNotFound, 50025, "no such object") + ErrNoSuchObject = gfsperrors.Register(module.AuthenticationModularName, http.StatusNotFound, 50025, "no such object") ErrConsensus = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 55001, "server slipped away, try again later") diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index 276f3c487..05fa433cc 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -26,11 +26,11 @@ import ( // putObjectHandler handles the upload object request. func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { var ( - err error - reqCtx *RequestContext - authorized bool - objectInfo *storagetypes.ObjectInfo - params *storagetypes.Params + err error + reqCtx *RequestContext + authenticated bool + objectInfo *storagetypes.ObjectInfo + params *storagetypes.Params ) uploadPrimaryStartTime := time.Now() @@ -51,16 +51,16 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { if err != nil { return } - if reqCtx.NeedVerifyAuthorizer() { - startAuthirzerTime := time.Now() - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + if reqCtx.NeedVerifyAuthentication() { + startAuthenticationTime := time.Now() + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypePutObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName) - metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_authorizer").Observe(time.Since(startAuthirzerTime).Seconds()) + metrics.PerfUploadTimeHistogram.WithLabelValues("uploader_authorizer").Observe(time.Since(startAuthenticationTime).Seconds()) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -134,16 +134,16 @@ func parseRange(rangeStr string) (bool, int64, int64) { // getObjectHandler handles the download object request. func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { var ( - err error - reqCtxErr error - reqCtx *RequestContext - authorized bool - objectInfo *storagetypes.ObjectInfo - bucketInfo *storagetypes.BucketInfo - params *storagetypes.Params - lowOffset int64 - highOffset int64 - pieceInfos []*downloader.SegmentPieceInfo + err error + reqCtxErr error + reqCtx *RequestContext + authenticated bool + objectInfo *storagetypes.ObjectInfo + bucketInfo *storagetypes.BucketInfo + params *storagetypes.Params + lowOffset int64 + highOffset int64 + pieceInfos []*downloader.SegmentPieceInfo ) getObjectStartTime := time.Now() defer func() { @@ -161,30 +161,30 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { reqCtx, reqCtxErr = NewRequestContext(r, g) // check the object permission whether allow public read. verifyObjectPermissionTime := time.Now() - if authorized, err = g.baseApp.Consensus().VerifyGetObjectPermission(reqCtx.Context(), sdk.AccAddress{}.String(), + if authenticated, err = g.baseApp.Consensus().VerifyGetObjectPermission(reqCtx.Context(), sdk.AccAddress{}.String(), reqCtx.bucketName, reqCtx.objectName); err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize for getting public object", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication for getting public object", "error", err) err = ErrConsensus return } metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_verify_object_permission_time").Observe(time.Since(verifyObjectPermissionTime).Seconds()) - if !authorized { + if !authenticated { if reqCtxErr != nil { err = reqCtxErr log.CtxErrorw(reqCtx.Context(), "no permission to operate, object is not public", "error", err) return } - if reqCtx.NeedVerifyAuthorizer() { + if reqCtx.NeedVerifyAuthentication() { authTime := time.Now() - if authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + if authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeGetObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName); err != nil { metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_auth_time").Observe(time.Since(authTime).Seconds()) - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } metrics.PerfGetObjectTimeHistogram.WithLabelValues("get_object_auth_time").Observe(time.Since(authTime).Seconds()) - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -279,11 +279,11 @@ func (g *GateModular) getObjectHandler(w http.ResponseWriter, r *http.Request) { // queryUploadProgressHandler handles the query uploaded object progress request. func (g *GateModular) queryUploadProgressHandler(w http.ResponseWriter, r *http.Request) { var ( - err error - reqCtx *RequestContext - authorized bool - objectInfo *storagetypes.ObjectInfo - taskState int32 + err error + reqCtx *RequestContext + authenticated bool + objectInfo *storagetypes.ObjectInfo + taskState int32 ) defer func() { reqCtx.Cancel() @@ -301,14 +301,14 @@ func (g *GateModular) queryUploadProgressHandler(w http.ResponseWriter, r *http. if err != nil { return } - if reqCtx.NeedVerifyAuthorizer() { - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + if reqCtx.NeedVerifyAuthentication() { + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeGetUploadingState, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return @@ -357,7 +357,7 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, var ( err error reqCtx *RequestContext - authorized bool + authenticated bool isRange bool rangeStart int64 rangeEnd int64 @@ -481,13 +481,13 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, reqCtx.account = accAddress.String() // 2. check permission - authorized, err = g.baseApp.GfSpClient().VerifyAuthorize(reqCtx.Context(), + authenticated, err = g.baseApp.GfSpClient().VerifyAuthentication(reqCtx.Context(), coremodule.AuthOpTypeGetObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName) if err != nil { - log.CtxErrorw(reqCtx.Context(), "failed to verify authorize", "error", err) + log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) return } - if !authorized { + if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") err = ErrNoPermission return diff --git a/modular/gater/request_context.go b/modular/gater/request_context.go index 19bf37272..67593c162 100644 --- a/modular/gater/request_context.go +++ b/modular/gater/request_context.go @@ -8,13 +8,13 @@ import ( "strings" "time" + commonhttp "github.com/bnb-chain/greenfield-common/go/http" "github.com/cosmos/cosmos-sdk/crypto/keys/eth/ethsecp256k1" sdk "github.com/cosmos/cosmos-sdk/types" ethcrypto "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/secp256k1" "github.com/gorilla/mux" - commonhttp "github.com/bnb-chain/greenfield-common/go/http" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" ) @@ -127,14 +127,14 @@ func (r *RequestContext) String() string { getRequestIP(r.request), time.Since(r.startTime), r.err) } -// NeedVerifyAuthorizer is temporary to Compatible SignatureV2 -func (r *RequestContext) NeedVerifyAuthorizer() bool { +// NeedVerifyAuthentication is temporary to Compatible SignatureV2 +func (r *RequestContext) NeedVerifyAuthentication() bool { requestSignature := r.request.Header.Get(GnfdAuthorizationHeader) v1SignaturePrefix := signaturePrefix(SignTypeV1, SignAlgorithm) return strings.HasPrefix(requestSignature, v1SignaturePrefix) } -// signaturePrefix return supported Authorization prefix +// signaturePrefix return supported Authentication prefix func signaturePrefix(version, algorithm string) string { return version + " " + algorithm + "," } @@ -175,12 +175,12 @@ func (r *RequestContext) verifySignatureV1(requestSignature string) (sdk.AccAddr requestSignature = strings.ReplaceAll(requestSignature, " ", "") signatureItems := strings.Split(requestSignature, ",") if len(signatureItems) < 2 { - return nil, ErrAuthorizationFormat + return nil, ErrAuthorizationHeaderFormat } for _, item := range signatureItems { pair := strings.Split(item, "=") if len(pair) != 2 { - return nil, ErrAuthorizationFormat + return nil, ErrAuthorizationHeaderFormat } switch pair[0] { case SignedMsg: @@ -190,7 +190,7 @@ func (r *RequestContext) verifySignatureV1(requestSignature string) (sdk.AccAddr return nil, err } default: - return nil, ErrAuthorizationFormat + return nil, ErrAuthorizationHeaderFormat } } @@ -266,12 +266,12 @@ func parseSignedMsgAndSigFromRequest(requestSignature string) (*string, *string, requestSignature = strings.ReplaceAll(requestSignature, "\\n", "\n") signatureItems := strings.Split(requestSignature, ",") if len(signatureItems) != 2 { - return nil, nil, ErrAuthorizationFormat + return nil, nil, ErrAuthorizationHeaderFormat } for _, item := range signatureItems { pair := strings.Split(item, "=") if len(pair) != 2 { - return nil, nil, ErrAuthorizationFormat + return nil, nil, ErrAuthorizationHeaderFormat } switch pair[0] { case SignedMsg: @@ -279,7 +279,7 @@ func parseSignedMsgAndSigFromRequest(requestSignature string) (*string, *string, case Signature: signature = pair[1] default: - return nil, nil, ErrAuthorizationFormat + return nil, nil, ErrAuthorizationHeaderFormat } } diff --git a/modular/signer/signer.go b/modular/signer/signer.go index ad772aa17..eb3bc4259 100644 --- a/modular/signer/signer.go +++ b/modular/signer/signer.go @@ -6,8 +6,9 @@ import ( "time" sdkmath "cosmossdk.io/math" - "github.com/bnb-chain/greenfield-common/go/hash" + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspp2p" @@ -15,15 +16,14 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" - storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) var ( - ErrSignMsg = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120001, "sign message with private key failed") - ErrSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120002, "send seal object msg failed") - ErrRejectUnSealObjectOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120003, "send reject unseal object msg failed") - ErrDiscontinueBucketOnChain = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120004, "send discontinue bucket msg failed") - ErrDanglingPointer = gfsperrors.Register(module.SignerModularName, http.StatusBadRequest, 120005, "sign or tx msg pointer dangling") + ErrSignMsg = gfsperrors.Register(module.SignModularName, http.StatusBadRequest, 120001, "sign message with private key failed") + ErrSealObjectOnChain = gfsperrors.Register(module.SignModularName, http.StatusBadRequest, 120002, "send seal object msg failed") + ErrRejectUnSealObjectOnChain = gfsperrors.Register(module.SignModularName, http.StatusBadRequest, 120003, "send reject unseal object msg failed") + ErrDiscontinueBucketOnChain = gfsperrors.Register(module.SignModularName, http.StatusBadRequest, 120004, "send discontinue bucket msg failed") + ErrDanglingPointer = gfsperrors.Register(module.SignModularName, http.StatusBadRequest, 120005, "sign or tx msg pointer dangling") ) var _ module.Signer = &SignModular{} @@ -34,7 +34,7 @@ type SignModular struct { } func (s *SignModular) Name() string { - return module.SignerModularName + return module.SignModularName } func (s *SignModular) Start(ctx context.Context) error { diff --git a/proto/base/types/gfspserver/authorization.proto b/proto/base/types/gfspserver/authenticator.proto similarity index 94% rename from proto/base/types/gfspserver/authorization.proto rename to proto/base/types/gfspserver/authenticator.proto index 4cd35225b..98c1223d9 100644 --- a/proto/base/types/gfspserver/authorization.proto +++ b/proto/base/types/gfspserver/authenticator.proto @@ -5,14 +5,14 @@ import "base/types/gfsperrors/error.proto"; option go_package = "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver"; -message GfSpAuthorizeRequest { +message GfSpAuthenticationRequest { int32 auth_type = 1; string user_account = 2; string bucket_name = 3; string object_name = 4; } -message GfSpAuthorizeResponse { +message GfSpAuthenticationResponse { base.types.gfsperrors.GfSpError err = 1; bool allowed = 2; } @@ -80,8 +80,8 @@ message VerifyOffChainSignatureResponse { bool result = 2; } -service GfSpAuthorizationService { - rpc GfSpVerifyAuthorize(GfSpAuthorizeRequest) returns (GfSpAuthorizeResponse) {} +service GfSpAuthenticationService { + rpc GfSpVerifyAuthentication(GfSpAuthenticationRequest) returns (GfSpAuthenticationResponse) {} // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. rpc GetAuthNonce(GetAuthNonceRequest) returns (GetAuthNonceResponse) {} // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. From 3132c3b1957ad3eb70c05ad45a4b9d1b617a5048 Mon Sep 17 00:00:00 2001 From: constwz <122766871+constwz@users.noreply.github.com> Date: Mon, 19 Jun 2023 16:02:00 +0800 Subject: [PATCH 18/78] fix:blocksyncer queue (#623) Co-authored-by: constwz --- modular/blocksyncer/blocksyncer_indexer.go | 16 ++++++++-------- modular/blocksyncer/blocksyncer_options.go | 15 ++++----------- 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/modular/blocksyncer/blocksyncer_indexer.go b/modular/blocksyncer/blocksyncer_indexer.go index a60e723a1..be74fdf31 100644 --- a/modular/blocksyncer/blocksyncer_indexer.go +++ b/modular/blocksyncer/blocksyncer_indexer.go @@ -25,12 +25,12 @@ import ( func NewIndexer(codec codec.Codec, proxy node.Node, db database.Database, modules []modules.Module, serviceName string) parser.Indexer { return &Impl{ - codec: codec, - Node: proxy, - DB: db, - Modules: modules, - ServiceName: serviceName, - ProcessedQueue: make(chan uint64, 5), + codec: codec, + Node: proxy, + DB: db, + Modules: modules, + ServiceName: serviceName, + ProcessedHeight: 0, } } @@ -42,7 +42,7 @@ type Impl struct { LatestBlockHeight atomic.Value CatchUpFlag atomic.Value - ProcessedQueue chan uint64 + ProcessedHeight uint64 ServiceName string } @@ -150,7 +150,7 @@ func (i *Impl) Process(height uint64) error { blockMap.Delete(heightKey) eventMap.Delete(heightKey) txMap.Delete(heightKey) - i.ProcessedQueue <- height + i.ProcessedHeight = height return nil } diff --git a/modular/blocksyncer/blocksyncer_options.go b/modular/blocksyncer/blocksyncer_options.go index e5b1b4d31..a6e6fc083 100644 --- a/modular/blocksyncer/blocksyncer_options.go +++ b/modular/blocksyncer/blocksyncer_options.go @@ -152,7 +152,7 @@ func (b *BlockSyncerModular) serve(ctx context.Context) { } } // Create a queue that will collect, aggregate, and export blocks and metadata - exportQueue := types.NewQueue(25) + exportQueue := types.NewQueue(100) // Create workers worker := parser.NewWorker(b.parserCtx, exportQueue, 0, config.Cfg.Parser.ConcurrentSync) @@ -180,8 +180,6 @@ func (b *BlockSyncerModular) serve(ctx context.Context) { go b.enqueueNewBlocks(ctx, exportQueue, lastDbBlockHeight+1) // Start each blocking worker in a go-routine where the worker consumes jobs - // off of the export queue. - Cast(b.parserCtx.Indexer).ProcessedQueue <- uint64(0) // init ProcessedQueue go worker.Start(ctx) } @@ -204,7 +202,6 @@ func (b *BlockSyncerModular) enqueueNewBlocks(context context.Context, exportQue // log.Debugw("enqueueing new block", "height", currHeight) exportQueue <- currHeight } - time.Sleep(config.GetAvgBlockTime()) } } } @@ -227,7 +224,7 @@ func (b *BlockSyncerModular) getLatestBlockHeight(ctx context.Context) { } Cast(b.parserCtx.Indexer).GetLatestBlockHeight().Store(latestBlockHeight) - time.Sleep(config.GetAvgBlockTime()) + time.Sleep(time.Second) } } } @@ -244,13 +241,9 @@ func (b *BlockSyncerModular) quickFetchBlockData(startHeight uint64) { Cast(b.parserCtx.Indexer).GetCatchUpFlag().Store(int64(count*cycle + startHeight - 1)) break } - processedHeight, ok := <-Cast(b.parserCtx.Indexer).ProcessedQueue - if !ok { - log.Warnf("ProcessedQueue is closed") - return - } - log.Infof("processedHeight:%d, will process height:%d", processedHeight, count*cycle+startHeight) + processedHeight := Cast(b.parserCtx.Indexer).ProcessedHeight if processedHeight != 0 && count*cycle+startHeight-processedHeight > MaxHeightGapFactor*count { + time.Sleep(time.Second) continue } b.fetchData(count, cycle, startHeight, latestBlockHeight) From 7caaba0b7af2b2da44f6ff1d5810b233a35a4871 Mon Sep 17 00:00:00 2001 From: joeycli Date: Mon, 19 Jun 2023 16:23:43 +0800 Subject: [PATCH 19/78] chore: delete reject unseal object logic (#628) --- base/gfspapp/task_options.go | 8 +- modular/executor/execute_task.go | 39 +++---- modular/executor/executor.go | 2 + modular/manager/manage_task.go | 182 ++++++++++++++++--------------- modular/signer/signer_client.go | 12 +- 5 files changed, 130 insertions(+), 113 deletions(-) diff --git a/base/gfspapp/task_options.go b/base/gfspapp/task_options.go index d3295fb5e..45495ddb4 100644 --- a/base/gfspapp/task_options.go +++ b/base/gfspapp/task_options.go @@ -20,9 +20,9 @@ const ( // MaxReplicateTime defines the max timeout to replicate object. MaxReplicateTime int64 = 500 // MinReceiveTime defines the min timeout to confirm the received piece whether is sealed on greenfield. - MinReceiveTime int64 = 5 + MinReceiveTime int64 = 10 // MaxReceiveTime defines the max timeout to confirm the received piece whether is sealed on greenfield. - MaxReceiveTime int64 = 10 + MaxReceiveTime int64 = 30 // MinSealObjectTime defines the min timeout to seal object to greenfield. MinSealObjectTime int64 = 2 // MaxSealObjectTime defines the max timeout to seal object to greenfield. @@ -51,9 +51,9 @@ const ( // MaxReplicateRetry defines the max retry number to replicate object. MaxReplicateRetry = 6 // MinReceiveConfirmRetry defines the min retry number to confirm received piece is sealed on greenfield. - MinReceiveConfirmRetry = 2 + MinReceiveConfirmRetry = 20 // MaxReceiveConfirmRetry defines the max retry number to confirm received piece is sealed on greenfield. - MaxReceiveConfirmRetry = 6 + MaxReceiveConfirmRetry = 60 // MinSealObjectRetry defines the min retry number to seal object. MinSealObjectRetry = 3 // MaxSealObjectRetry defines the max retry number to seal object. diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 44fbe9dc6..5403130d9 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -133,25 +133,26 @@ func (e *ExecuteModular) HandleReceivePieceTask(ctx context.Context, task coreta "expect", onChainObject.GetSecondarySpAddresses()[int(task.GetReplicateIdx())], "current", e.baseApp.OperatorAddress()) task.SetError(ErrSecondaryMismatch) - err = e.baseApp.GfSpDB().DeleteObjectIntegrity(task.GetObjectInfo().Id.Uint64()) - if err != nil { - log.CtxErrorw(ctx, "failed to delete integrity") - } - var pieceKey string - segmentCount := e.baseApp.PieceOp().SegmentPieceCount(onChainObject.GetPayloadSize(), - task.GetStorageParams().GetMaxPayloadSize()) - for i := uint32(0); i < segmentCount; i++ { - if task.GetObjectInfo().GetRedundancyType() == storagetypes.REDUNDANCY_EC_TYPE { - pieceKey = e.baseApp.PieceOp().ECPieceKey(onChainObject.Id.Uint64(), - i, task.GetReplicateIdx()) - } else { - pieceKey = e.baseApp.PieceOp().SegmentPieceKey(onChainObject.Id.Uint64(), i) - } - err = e.baseApp.PieceStore().DeletePiece(ctx, pieceKey) - if err != nil { - log.CtxErrorw(ctx, "failed to delete piece data", "piece_key", pieceKey) - } - } + // TODO:: gc zombie task will gc the zombie piece, it is a conservative plan + //err = e.baseApp.GfSpDB().DeleteObjectIntegrity(task.GetObjectInfo().Id.Uint64()) + //if err != nil { + // log.CtxErrorw(ctx, "failed to delete integrity") + //} + //var pieceKey string + //segmentCount := e.baseApp.PieceOp().SegmentPieceCount(onChainObject.GetPayloadSize(), + // task.GetStorageParams().GetMaxPayloadSize()) + //for i := uint32(0); i < segmentCount; i++ { + // if task.GetObjectInfo().GetRedundancyType() == storagetypes.REDUNDANCY_EC_TYPE { + // pieceKey = e.baseApp.PieceOp().ECPieceKey(onChainObject.Id.Uint64(), + // i, task.GetReplicateIdx()) + // } else { + // pieceKey = e.baseApp.PieceOp().SegmentPieceKey(onChainObject.Id.Uint64(), i) + // } + // err = e.baseApp.PieceStore().DeletePiece(ctx, pieceKey) + // if err != nil { + // log.CtxErrorw(ctx, "failed to delete piece data", "piece_key", pieceKey) + // } + //} return } log.CtxDebugw(ctx, "succeed to handle confirm receive piece task") diff --git a/modular/executor/executor.go b/modular/executor/executor.go index 2ef66fc38..e1f7c4929 100644 --- a/modular/executor/executor.go +++ b/modular/executor/executor.go @@ -136,6 +136,8 @@ func (e *ExecuteModular) AskTask(ctx context.Context) error { askTask.EstimateLimit().String(), "remaining", limit.String(), "error", err) return err } + metrics.RunningTaskNumberGauge.WithLabelValues("running_task_num").Set(float64(atomic.LoadInt64(&e.executingNum))) + metrics.MaxTaskNumberGauge.WithLabelValues("max_task_num").Set(float64(atomic.LoadInt64(&e.executingNum))) defer e.ReleaseResource(ctx, span) defer e.ReportTask(ctx, askTask) ctx = log.WithValue(ctx, log.CtxKeyTask, askTask.Key().String()) diff --git a/modular/manager/manage_task.go b/modular/manager/manage_task.go index fbf5930ca..9f9ba101e 100644 --- a/modular/manager/manage_task.go +++ b/modular/manager/manage_task.go @@ -117,27 +117,22 @@ func (m *ManageModular) HandleDoneUploadObjectTask(ctx context.Context, task tas return ErrRepeatedTask } if task.Error() != nil { - startUpdateSPDBTime := time.Now() - err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_UPLOAD_OBJECT_ERROR, - ErrorDescription: task.Error().Error(), - }) - metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_update_spdb"). - Observe(time.Since(startUpdateSPDBTime).Seconds()) - - if err != nil { - log.CtxErrorw(ctx, "failed to update object task state", "error", err) - return ErrGfSpDB - } - - startRejectUnSealTime := time.Now() - err = m.RejectUnSealObject(ctx, task.GetObjectInfo()) - metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_reject_unseal"). - Observe(time.Since(startRejectUnSealTime).Seconds()) - - log.CtxErrorw(ctx, "reports failed update object task and reject unseal object", - "task_info", task.Info(), "error", task.Error(), "reject_unseal_error", err) + go func() error { + startUpdateSPDBTime := time.Now() + err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_UPLOAD_OBJECT_ERROR, + ErrorDescription: task.Error().Error(), + }) + metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_update_spdb"). + Observe(time.Since(startUpdateSPDBTime).Seconds()) + if err != nil { + log.CtxErrorw(ctx, "failed to update object task state", "error", err) + return ErrGfSpDB + } + log.CtxErrorw(ctx, "reports failed update object task", "task_info", task.Info(), "error", task.Error()) + return nil + }() return nil } replicateTask := &gfsptask.GfSpReplicatePieceTask{} @@ -150,23 +145,25 @@ func (m *ManageModular) HandleDoneUploadObjectTask(ctx context.Context, task tas err := m.replicateQueue.Push(replicateTask) metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_push_replicate_queue"). Observe(time.Since(startPushReplicateQueueTime).Seconds()) - if err != nil { log.CtxErrorw(ctx, "failed to push replicate piece task to queue", "error", err) return err } - startUpdateSPDBTime := time.Now() - err = m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_REPLICATE_OBJECT_DOING, - }) - metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_update_spdb"). - Observe(time.Since(startUpdateSPDBTime).Seconds()) - if err != nil { - log.CtxErrorw(ctx, "failed to update object task state", "error", err) - return ErrGfSpDB - } - log.CtxDebugw(ctx, "succeed to done upload object and waiting for scheduling to replicate piece", "task_info", task.Info()) + go func() error { + startUpdateSPDBTime := time.Now() + err = m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_REPLICATE_OBJECT_DOING, + }) + metrics.PerfUploadTimeHistogram.WithLabelValues("report_upload_task_update_spdb"). + Observe(time.Since(startUpdateSPDBTime).Seconds()) + if err != nil { + log.CtxErrorw(ctx, "failed to update object task state", "error", err) + return ErrGfSpDB + } + log.CtxDebugw(ctx, "succeed to done upload object and waiting for scheduling to replicate piece", "task_info", task.Info()) + return nil + }() return nil } @@ -177,7 +174,8 @@ func (m *ManageModular) HandleReplicatePieceTask(ctx context.Context, task task. } if task.Error() != nil { log.CtxErrorw(ctx, "handler error replicate piece task", "task_info", task.Info(), "error", task.Error()) - return m.handleFailedReplicatePieceTask(ctx, task) + go m.handleFailedReplicatePieceTask(ctx, task) + return nil } m.replicateQueue.PopByKey(task.Key()) if m.TaskUploading(ctx, task) { @@ -185,17 +183,20 @@ func (m *ManageModular) HandleReplicatePieceTask(ctx context.Context, task task. return ErrRepeatedTask } if task.GetSealed() { - metrics.SealObjectSucceedCounter.WithLabelValues(m.Name()).Inc() - log.CtxDebugw(ctx, "replicate piece object task has combined seal object task") - if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DONE, - }); err != nil { - log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) - // succeed, ignore this error - // return ErrGfSpDB - } - // TODO: delete this upload db record? + go func() error { + metrics.SealObjectSucceedCounter.WithLabelValues(m.Name()).Inc() + log.CtxDebugw(ctx, "replicate piece object task has combined seal object task") + if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DONE, + }); err != nil { + log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) + // succeed, ignore this error + // return ErrGfSpDB + } + // TODO: delete this upload db record? + return nil + }() return nil } log.CtxDebugw(ctx, "replicate piece object task fails to combine seal object task", "task_info", task.Info()) @@ -206,19 +207,22 @@ func (m *ManageModular) HandleReplicatePieceTask(ctx context.Context, task task. err := m.sealQueue.Push(sealObject) if err != nil { log.CtxErrorw(ctx, "failed to push seal object task to queue", "task_info", task.Info(), "error", err) - return ErrExceedTask - } - if err = m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DOING, - SecondaryAddresses: task.GetSecondaryAddresses(), - SecondarySignatures: task.GetSecondarySignatures(), - ErrorDescription: "", - }); err != nil { - log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) - return ErrGfSpDB + return err } - log.CtxDebugw(ctx, "succeed to done replicate piece and waiting for scheduling to seal object", "task_info", task.Info()) + go func() error { + if err = m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DOING, + SecondaryAddresses: task.GetSecondaryAddresses(), + SecondarySignatures: task.GetSecondarySignatures(), + ErrorDescription: "", + }); err != nil { + log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) + return ErrGfSpDB + } + log.CtxDebugw(ctx, "succeed to done replicate piece and waiting for scheduling to seal object", "task_info", task.Info()) + return nil + }() return nil } @@ -246,9 +250,7 @@ func (m *ManageModular) handleFailedReplicatePieceTask(ctx context.Context, hand log.CtxErrorw(ctx, "failed to update object task state", "task_info", handleTask.Info(), "error", err) return ErrGfSpDB } - err := m.RejectUnSealObject(ctx, handleTask.GetObjectInfo()) - log.CtxWarnw(ctx, "delete expired replicate piece task and reject unseal object", - "task_info", handleTask.Info(), "reject_unseal_error", err) + log.CtxWarnw(ctx, "delete expired replicate piece task", "task_info", handleTask.Info()) } return nil } @@ -260,20 +262,24 @@ func (m *ManageModular) HandleSealObjectTask(ctx context.Context, task task.Seal } if task.Error() != nil { log.CtxErrorw(ctx, "handler error seal object task", "task_info", task.Info(), "error", task.Error()) - return m.handleFailedSealObjectTask(ctx, task) - } - metrics.SealObjectSucceedCounter.WithLabelValues(m.Name()).Inc() - m.sealQueue.PopByKey(task.Key()) - if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ - ObjectID: task.GetObjectInfo().Id.Uint64(), - TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DONE, - }); err != nil { - log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) - // succeed, ignore this error - // return ErrGfSpDB - } - // TODO: delete this upload db record? - log.CtxDebugw(ctx, "succeed to seal object on chain", "task_info", task.Info()) + go m.handleFailedSealObjectTask(ctx, task) + return nil + } + go func() error { + metrics.SealObjectSucceedCounter.WithLabelValues(m.Name()).Inc() + m.sealQueue.PopByKey(task.Key()) + if err := m.baseApp.GfSpDB().UpdateUploadProgress(&spdb.UploadObjectMeta{ + ObjectID: task.GetObjectInfo().Id.Uint64(), + TaskState: types.TaskState_TASK_STATE_SEAL_OBJECT_DONE, + }); err != nil { + log.CtxErrorw(ctx, "failed to update object task state", "task_info", task.Info(), "error", err) + // succeed, ignore this error + // return ErrGfSpDB + } + // TODO: delete this upload db record? + log.CtxDebugw(ctx, "succeed to seal object on chain", "task_info", task.Info()) + return nil + }() return nil } @@ -301,27 +307,28 @@ func (m *ManageModular) handleFailedSealObjectTask(ctx context.Context, handleTa }); err != nil { log.CtxErrorw(ctx, "failed to update object task state", "task_info", handleTask.Info(), "error", err) } - err := m.RejectUnSealObject(ctx, handleTask.GetObjectInfo()) - log.CtxWarnw(ctx, "delete expired seal object task and reject unseal object", - "task_info", handleTask.Info(), "reject_unseal_error", err) + log.CtxWarnw(ctx, "delete expired seal object task", "task_info", handleTask.Info()) } return nil } func (m *ManageModular) HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) error { if task.GetSealed() { - m.receiveQueue.PopByKey(task.Key()) + go m.receiveQueue.PopByKey(task.Key()) log.CtxDebugw(ctx, "succeed to confirm receive piece seal on chain") } else if task.Error() != nil { - return m.handleFailedReceivePieceTask(ctx, task) + go m.handleFailedReceivePieceTask(ctx, task) + return nil } else { - task.SetRetry(0) - task.SetMaxRetry(m.baseApp.TaskMaxRetry(task)) - task.SetTimeout(m.baseApp.TaskTimeout(task, 0)) - task.SetPriority(m.baseApp.TaskPriority(task)) - task.SetUpdateTime(time.Now().Unix()) - err := m.receiveQueue.Push(task) - log.CtxErrorw(ctx, "push receive task to queue", "error", err) + go func() { + task.SetRetry(0) + task.SetMaxRetry(m.baseApp.TaskMaxRetry(task)) + task.SetTimeout(m.baseApp.TaskTimeout(task, 0)) + task.SetPriority(m.baseApp.TaskPriority(task)) + task.SetUpdateTime(time.Now().Unix()) + err := m.receiveQueue.Push(task) + log.CtxErrorw(ctx, "push receive task to queue", "error", err) + }() } return nil } @@ -340,7 +347,6 @@ func (m *ManageModular) handleFailedReceivePieceTask(ctx context.Context, handle } else { log.CtxErrorw(ctx, "delete expired confirm receive piece task", "task_info", handleTask.Info()) // TODO: confirm it - } return nil } diff --git a/modular/signer/signer_client.go b/modular/signer/signer_client.go index 653c33953..cfa4b2e52 100644 --- a/modular/signer/signer_client.go +++ b/modular/signer/signer_client.go @@ -192,7 +192,6 @@ func (client *GreenfieldChainSignClient) SealObject( client.mu.Lock() defer client.mu.Unlock() - nonce := client.sealAccNonce msgSealObject := storagetypes.NewMsgSealObject(km.GetAddr(), sealObject.BucketName, sealObject.ObjectName, secondarySPAccs, sealObject.SecondarySpSignatures) @@ -201,8 +200,13 @@ func (client *GreenfieldChainSignClient) SealObject( var ( resp *tx.BroadcastTxResponse txHash []byte + nonce uint64 ) for i := 0; i < BroadcastTxRetry; i++ { + nonce, err = client.greenfieldClients[scope].GetNonce() + if err != nil { + nonce = client.sealAccNonce + } txOpt := &ctypes.TxOption{ Mode: &mode, GasLimit: client.gasLimit, @@ -266,7 +270,6 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( client.mu.Lock() defer client.mu.Unlock() - nonce := client.sealAccNonce msgRejectUnSealObject := storagetypes.NewMsgRejectUnsealedObject(km.GetAddr(), rejectObject.GetBucketName(), rejectObject.GetObjectName()) mode := tx.BroadcastMode_BROADCAST_MODE_ASYNC @@ -274,8 +277,13 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( var ( resp *tx.BroadcastTxResponse txHash []byte + nonce uint64 ) for i := 0; i < BroadcastTxRetry; i++ { + nonce, err = client.greenfieldClients[scope].GetNonce() + if err != nil { + nonce = client.sealAccNonce + } txOpt := &ctypes.TxOption{ Mode: &mode, GasLimit: client.gasLimit, From 60b686c4a913f70b26d1d57577a64ce56e451017 Mon Sep 17 00:00:00 2001 From: joeycli Date: Tue, 20 Jun 2023 14:14:59 +0800 Subject: [PATCH 20/78] fix: signer nonce mismatch under fullnode (#632) --- modular/signer/signer_client.go | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/modular/signer/signer_client.go b/modular/signer/signer_client.go index cfa4b2e52..fadc4a0ac 100644 --- a/modular/signer/signer_client.go +++ b/modular/signer/signer_client.go @@ -202,11 +202,8 @@ func (client *GreenfieldChainSignClient) SealObject( txHash []byte nonce uint64 ) + nonce = client.sealAccNonce for i := 0; i < BroadcastTxRetry; i++ { - nonce, err = client.greenfieldClients[scope].GetNonce() - if err != nil { - nonce = client.sealAccNonce - } txOpt := &ctypes.TxOption{ Mode: &mode, GasLimit: client.gasLimit, @@ -227,8 +224,6 @@ func (client *GreenfieldChainSignClient) SealObject( } continue } - client.sealAccNonce = nonce + 1 - if resp.TxResponse.Code != 0 { log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code) ErrSealObjectOnChain.SetError(fmt.Errorf("failed to broadcast seal object tx, resp_code: %d", resp.TxResponse.Code)) @@ -243,6 +238,7 @@ func (client *GreenfieldChainSignClient) SealObject( continue } if err == nil { + client.sealAccNonce = nonce + 1 log.CtxDebugw(ctx, "succeed to broadcast seal object tx", "tx_hash", txHash) return txHash, nil } @@ -279,11 +275,8 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( txHash []byte nonce uint64 ) + nonce = client.sealAccNonce for i := 0; i < BroadcastTxRetry; i++ { - nonce, err = client.greenfieldClients[scope].GetNonce() - if err != nil { - nonce = client.sealAccNonce - } txOpt := &ctypes.TxOption{ Mode: &mode, GasLimit: client.gasLimit, @@ -304,7 +297,6 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( } continue } - client.sealAccNonce = nonce + 1 if resp.TxResponse.Code != 0 { log.CtxErrorf(ctx, "failed to broadcast tx, resp code: %d", resp.TxResponse.Code) @@ -321,6 +313,7 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( } if err == nil { + client.sealAccNonce = nonce + 1 log.CtxDebugw(ctx, "succeed to broadcast reject unseal object tx", "tx_hash", txHash) return txHash, nil } From 7a83153238181d042fb092ca122b6803d4bea964 Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Tue, 20 Jun 2023 17:19:14 +0800 Subject: [PATCH 21/78] docs: refactor sp framework docs (#544) * docs: refactor sp docs * docs: add module docs * docs: add store docs * docs: rename doc files * docs: update sp config --------- Co-authored-by: VM --- base/gfspapp/gfsp_modmgr.go | 38 +- core/lifecycle/README.md | 33 +- core/lifecycle/lifecycle.go | 19 +- core/module/README.md | 155 +++-- core/module/modular.go | 206 +++--- core/module/null_modular.go | 3 +- core/piecestore/piecestore.go | 4 +- core/rcmgr/README.md | 72 +- core/rcmgr/rcmgr.go | 4 +- core/task/README.md | 191 +++--- core/task/const.go | 3 +- core/task/null_task.go | 3 +- core/task/task.go | 166 ++--- core/taskqueue/README.md | 40 +- docs/introduction/01-overview.md | 39 -- docs/introduction/overview.md | 77 +++ docs/modules/01-piece_store.md | 92 --- docs/modules/03-sp_db.md | 174 ----- docs/modules/04-bs_db.md | 198 ------ docs/modules/approver.md | 57 ++ docs/modules/authenticator.md | 207 ++++++ docs/modules/common/lifecycle_modular.md | 92 +++ docs/modules/common/proto.md | 446 ++++++++++++ docs/modules/common/task.md | 638 ++++++++++++++++++ docs/modules/downloader.md | 108 +++ docs/modules/gateway.md | 84 +++ docs/modules/manager.md | 60 ++ docs/modules/p2p.md | 57 ++ docs/modules/receiver.md | 45 ++ docs/modules/sginer.md | 101 +++ docs/modules/taskexecutor.md | 115 ++++ docs/modules/uploader.md | 42 ++ docs/readme.md | 40 +- docs/run-book/01-deployment.md | 115 ---- docs/run-book/02-localup.md | 74 -- docs/spconfig/config_template.toml | 179 +++++ docs/store/bs_db.md | 0 docs/store/piece_store.md | 116 ++++ .../02-redundancy.md => store/redundancy.md} | 0 docs/store/sp_db.md | 285 ++++++++ docs/workflow/01-get_approval.md | 88 --- docs/workflow/02-put_payload_data.md | 36 - docs/workflow/03-get_payload_data.md | 15 - docs/workflow/04-challenge_piece_data.md | 18 - docs/workflow/workflow.md | 138 ++++ store/piecestore/README.md | 5 +- 46 files changed, 3272 insertions(+), 1406 deletions(-) delete mode 100644 docs/introduction/01-overview.md create mode 100644 docs/introduction/overview.md delete mode 100644 docs/modules/01-piece_store.md delete mode 100644 docs/modules/03-sp_db.md delete mode 100644 docs/modules/04-bs_db.md create mode 100644 docs/modules/approver.md create mode 100644 docs/modules/authenticator.md create mode 100644 docs/modules/common/lifecycle_modular.md create mode 100644 docs/modules/common/proto.md create mode 100644 docs/modules/common/task.md create mode 100644 docs/modules/downloader.md create mode 100644 docs/modules/gateway.md create mode 100644 docs/modules/manager.md create mode 100644 docs/modules/p2p.md create mode 100644 docs/modules/receiver.md create mode 100644 docs/modules/sginer.md create mode 100644 docs/modules/taskexecutor.md create mode 100644 docs/modules/uploader.md delete mode 100644 docs/run-book/01-deployment.md delete mode 100644 docs/run-book/02-localup.md create mode 100644 docs/spconfig/config_template.toml create mode 100644 docs/store/bs_db.md create mode 100644 docs/store/piece_store.md rename docs/{modules/02-redundancy.md => store/redundancy.md} (100%) create mode 100644 docs/store/sp_db.md delete mode 100644 docs/workflow/01-get_approval.md delete mode 100644 docs/workflow/02-put_payload_data.md delete mode 100644 docs/workflow/03-get_payload_data.md delete mode 100644 docs/workflow/04-challenge_piece_data.md create mode 100644 docs/workflow/workflow.md diff --git a/base/gfspapp/gfsp_modmgr.go b/base/gfspapp/gfsp_modmgr.go index e9262e559..3f849b7aa 100644 --- a/base/gfspapp/gfsp_modmgr.go +++ b/base/gfspapp/gfsp_modmgr.go @@ -29,12 +29,12 @@ type ModularManager struct { mux sync.RWMutex } -var mdmgr *ModularManager +var modMgr *ModularManager var once sync.Once func init() { once.Do(func() { - mdmgr = &ModularManager{ + modMgr = &ModularManager{ descriptions: make(map[string]string), newModularFunc: make(map[string]NewModularFunc), } @@ -43,47 +43,47 @@ func init() { // RegisterModular registers the module info to the global ModularManager func RegisterModular(name string, description string, newFunc NewModularFunc) { - mdmgr.mux.Lock() - defer mdmgr.mux.Unlock() + modMgr.mux.Lock() + defer modMgr.mux.Unlock() if name == "" { log.Panic("modular name cannot be blank") } - if _, ok := mdmgr.newModularFunc[name]; ok { + if _, ok := modMgr.newModularFunc[name]; ok { log.Panicf("[%s] modular repeated", name) } - mdmgr.modulus = append(mdmgr.modulus, name) + modMgr.modulus = append(modMgr.modulus, name) if len(description) != 0 { - mdmgr.descriptions[name] = description + modMgr.descriptions[name] = description } - mdmgr.newModularFunc[name] = newFunc + modMgr.newModularFunc[name] = newFunc } // GetRegisterModulus returns the list registered modules. func GetRegisterModulus() []string { - mdmgr.mux.RLock() - defer mdmgr.mux.RUnlock() - return mdmgr.modulus + modMgr.mux.RLock() + defer modMgr.mux.RUnlock() + return modMgr.modulus } // GetRegisterModulusDescription returns the list registered modules' description. func GetRegisterModulusDescription() string { - mdmgr.mux.RLock() - defer mdmgr.mux.RUnlock() + modMgr.mux.RLock() + defer modMgr.mux.RUnlock() var descriptions string - names := maps.SortKeys(mdmgr.newModularFunc) + names := maps.SortKeys(modMgr.newModularFunc) for _, name := range names { descriptions = descriptions + fmt.Sprintf("%-"+strconv.Itoa(20)+"s %s\n", - name, mdmgr.descriptions[name]) + name, modMgr.descriptions[name]) } return descriptions } // GetNewModularFunc returns the list registered module's new instances func. func GetNewModularFunc(name string) NewModularFunc { - mdmgr.mux.RLock() - defer mdmgr.mux.RUnlock() - if _, ok := mdmgr.newModularFunc[name]; !ok { + modMgr.mux.RLock() + defer modMgr.mux.RUnlock() + if _, ok := modMgr.newModularFunc[name]; !ok { log.Panicf("not register [%s] modular info", name) } - return mdmgr.newModularFunc[name] + return modMgr.newModularFunc[name] } diff --git a/core/lifecycle/README.md b/core/lifecycle/README.md index d6dbb6cf6..596892283 100644 --- a/core/lifecycle/README.md +++ b/core/lifecycle/README.md @@ -1,38 +1,37 @@ -# ServiceLifecycle +# Lifecycle -ServiceLifecycle is the interface to the service life cycle management subsystem. -The ServiceLifecycle tracks the Service life cycle, listens to the signal of the -process for graceful exit. +Lifecycle interface manages the lifecycle of a service and tracks its state changes. It also listens for signals from +the process to ensure a graceful shutdown. -# Concept +## Concept -## Service +### Service Interface -Service is the interface for ServiceLifecycle to manage. The component that plans -to use ServiceLifecycle needs to implement the interface. +Service is an interface for Lifecycle to manage. The component that plans to use Lifecycle needs to implement this interface. ```go -// Service is the interface for ServiceLifecycle to manage. +// Service provides abstract methods to control the lifecycle of a service +// Every service must implement Service interface. type Service interface { - // Name defines the unique identifier of the service, which cannot be repeated - // globally. + // Name defines the unique identifier of a service, which cannot be repeated globally. Name() string - // Start the service, for resource application, start background coroutine and + // Start a service, for resource application, start background coroutine and // other startup operations. // - // The Start method should be used in non-block way, for example, a blocked + // Start method should be used in non-block way, for example, a blocked // listening socket should open a goroutine separately internally. Start(ctx context.Context) error - // Stop the service, close the goroutines inside the service, recycle resources, - // and ensure the graceful launch of the service. + // Stop a service, close the goroutines inside the service, recycle resources, + // and ensure the graceful shutdown of the service. Stop(ctx context.Context) error } ``` -# Example +### Example + ```go ctx := context.Background() svcLifecycle.RegisterServices(service...) // blocks the svcLifecycle for waiting signals to shut down the process svcLifecycle.Signals(syscall.SIGINT, syscall.SIGTERM ...).Init(ctx).StartServices(ctx).Wait(ctx) -``` \ No newline at end of file +``` diff --git a/core/lifecycle/lifecycle.go b/core/lifecycle/lifecycle.go index 5527db8da..d7093499e 100644 --- a/core/lifecycle/lifecycle.go +++ b/core/lifecycle/lifecycle.go @@ -6,6 +6,7 @@ import ( ) // Service provides abstract methods to control the lifecycle of a service +// Every service must implement Service interface. type Service interface { // Name describe service name Name() string @@ -15,23 +16,19 @@ type Service interface { Stop(ctx context.Context) error } -// Lifecycle is the interface to the service life cycle management subsystem. -// The ServiceLifecycle tracks the Service life cycle, listens to the signal -// of the process for graceful exit. +// Lifecycle is an interface to describe how service is managed. +// The Lifecycle tracks the Service lifecycle, listens for signals from +// the process to ensure a graceful shutdown. // -// All managed services must first call RegisterServices to register with -// ServiceLifecycle. +// All managed services must firstly call RegisterServices to register with Lifecycle. type Lifecycle interface { // RegisterServices registers service to ServiceLifecycle for managing. RegisterServices(modular ...Service) - // StartServices starts all registered services by calling Service.Start - // method. + // StartServices starts all registered services by calling Service.Start method. StartServices(ctx context.Context) Lifecycle - // StopServices stops all registered services by calling Service.Stop - // method. + // StopServices stops all registered services by calling Service.Stop method. StopServices(ctx context.Context) - // Signals listens the system signals for gracefully stop the registered - // services. + // Signals listens the system signals for gracefully stop the registered services. Signals(sigs ...os.Signal) Lifecycle // Wait waits the signal for stopping the ServiceLifecycle, before stopping // the ServiceLifecycle will call StopServices stops all registered services. diff --git a/core/module/README.md b/core/module/README.md index 79fa626be..807e81de8 100644 --- a/core/module/README.md +++ b/core/module/README.md @@ -1,79 +1,76 @@ -# Modular - -Modular is a complete logical module of SP. The GfSp framework is responsible -for the necessary interaction between modules. As for the implementation -of the module, it can be customized. Example, The GfSp framework stipulates -that ask object approval must be carried out before uploading an object, -whether agrees the approval, SP can be customized. - -# Concept - -## Front Modular -Front Modular handles the user's request, the gater will generate corresponding -task and send to Front Modular, the Front Modular need check the request -is correct. and after handle the task maybe some extra work is required. -So the Front Modular has three interfaces for each task type, `PreHandleXXXTask`, -`HandleXXXTask` and`PostHandleXXXTask`. Front Modular includes: `Approver`, -`Downloader` and `Uploader`. - -## Background Modular -Background Modular handles the SP inner task, since it is internally -generated, the correctness of the information can be guaranteed, so only -have one interface`HandleXXXTask`. Background Modular includes: `Authenticator`, -`TaskExecutor`,`Manager`, `P2P`, `c` and `Signer`. - - -# Modular Type - -The GfSp framework specifies the following modular: `Gater`, `Approver`, -`Authenticator`, `Uploader`, `Downloader`, `Manager`, `P2P`, `Receiver`, -`Signer`and `Metadata`. The GfSp framework also supports extending more -customized mudolar as needed. As long as it is registered in GfSp framework -and executes the modular interface, it will be initialized and scheduled. - -## Gater -Gater as SP's gateway, provides http service and follows the s3 protocol, -and generates corresponding task and forwards them to other modular in the -SP. It does not allow customization, so no interface is defined in the -modular file. - -## Approver -Approver is the modular to handle ask approval request, handles CreateBucketApproval -and CreateObjectApproval. - -## Authenticator -Authenticator is the modular to authentication verification. - -## Downloader -Downloader is the modular to handle get object request from user account, -and get challenge info request from other components in the system. - -## TaskExecutor -TaskExecutor is the modular to handle background task, it will ask task -from Manager modular, handle the task and report the result or status to -the manager modular includes: ReplicatePieceTask, SealObjectTask, -ReceivePieceTask, GCObjectTask, GCZombiePieceTask, GCMetaTask. - -## Manager -Manager is the modular to SP's manage modular, it is Responsible for task -scheduling and other management of SP. - -## P2P -P2P is the modular to the interaction of control information between Sps, -handles the ask replicate piece approval, it will broadcast the approval -to other SPs, wait the responses, if up to min approved number or max -approved number before timeout, will return the approvals. - -## Receiver -Receiver is the modular to receive the piece data from primary SP, calculates -the integrity hash of the piece data and sign it, returns to the primary SP -for sealing object on greenfield. - -## Signer -Signer is the modular to handle the SP's sign and on greenfield chain operator. -It holds SP all private key. Considering the sp account's sequence number, it -must be a singleton. - -## Uploader -Uploader is the modular to handle put object request from user account, and -store it in primary SP's piece store. \ No newline at end of file +# Module + +The Module is a self-contained logical component of SP, with necessary interactions between modules handled by the GfSp +framework. The implementation of the module can be customized as needed. For instance, while the GfSp framework requires +object approval before uploading, SP can customize whether to agree with the approval. + +## Concept + +### Front Modules + +The Front Modules are responsible for handling user requests. The Gater generates corresponding tasks and sends them to +the Front Modules. The Front Modules verify the correctness of the request and perform additional tasks after handling +the request. To accomplish this, the Front Modules have three interfaces for each task type: `PreHandleXXXTask`, +`HandleXXXTask` and `PostHandleXXXTask`. The Front Modules consist of `Approver`, `Downloader` and `Uploader`. + +### Background Modules + +The Background Modules are responsible for handling internal tasks of SP, which are generated internally and thus have +guaranteed information correctness. As a result, there is only one interface `HandleXXXTask` for these tasks. The Background +Modules consist of `Authenticator`, `TaskExecutor`, `Manager`, `P2P`, `Receiver` and `Signer`. + +### Module Type + +The GfSp framework comprises several modules, including `Gater`, `Approver`, `Authenticator`, `Uploader`, `Downloader`, +`Manager`, `P2P`, `TaskExecutor`, `Receiver`, `Signer`, `Metadata` and `BlockSyncer`. Additionally, the GfSp framework +supports the extension of customized modules as required. Once registered in the GfSp framework and executing the +modular interface, these customized modules will be initialized and scheduled. + +### Gater + +Gater module serves as the gateway for SP, providing HTTP services and adhering to the S3 protocol. It generates tasks +corresponding to user requests and forwards them to other modules within SP. Since Gater does not allow customization, +no interface is defined in the modular file. + +### Authenticator + +Authenticator module is responsible for verifying authentication. + +### Approver + +Approver module is responsible for handling approval requests, specifically `CreateBucketApproval` and `CreateObjectApproval`. + +### Uploader + +Uploader module handles the put object requests from user accounts and stores payload data into piece store of the primary SP. + +### Downloader + +Downloader module is responsible for handling get object request from user account and get challenge info request from +other components in the Greenfield system. + +### TaskExecutor + +TaskExecutor module is responsible for handling background task. This module can request tasks from the Manager module, +execute them and report the results or status back to the Manager. The tasks it can handle include ReplicatePieceTask, +SealObjectTask, ReceivePieceTask, GCObjectTask, GCZombiePieceTask, and GCMetaTask. + +### Manager + +Manager module is responsible for managing task scheduling of SP and other management functions. + +### P2P + +P2P module is responsible for handling the interaction of control information between SPs. It handles ask replicate piece +approval requests by broadcasting the approval to other SPs, waiting for responses and returning the approvals if the +minimum or maximum approved number is reached before the timeout. + +### Receiver + +Receiver module receives data from the primary SP, calculates the integrity hash of the data, signs it, and returns it +to the primary SP for sealing on a greenfield. + +### Signer + +Signer module handles the signing of the SP data on the Greenfield chain operator and holds all of the SP's private keys. +Due to the sequence number of the SP account, it must be a singleton. diff --git a/core/module/modular.go b/core/module/modular.go index 48106001a..62b933692 100644 --- a/core/module/modular.go +++ b/core/module/modular.go @@ -4,18 +4,18 @@ import ( "context" "io" + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspp2p" "github.com/bnb-chain/greenfield-storage-provider/core/lifecycle" "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/core/task" - storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) -// Modular is the interface to submodule that units scheduled by the GfSp framework. -// the Modular inherits lifecycle.Service interface, used to managed by lifecycle. -// and it also is managed by ResourceManager, the GfSp framework will reserve and -// release resources from Modular resources pool. +// Modular is a common interface for submodules that are scheduled by the GfSp framework. +// It inherits lifecycle.Service interface, which is used to manage lifecycle of services. Additionally, Modular is managed +// by ResourceManager, which allows the GfSp framework to reserve and release resources from the Modular resource pool. type Modular interface { lifecycle.Service // ReserveResource reserves the resources from Modular resources pool. @@ -48,202 +48,178 @@ const ( AuthOpTypeListBucketReadRecord ) -// Authenticator is the interface to authentication verification modular. +// Authenticator is an abstract interface to verify users authentication. type Authenticator interface { Modular // VerifyAuthentication verifies the operator authentication. VerifyAuthentication(ctx context.Context, auth AuthOpType, account, bucket, object string) (bool, error) - // GetAuthNonce get the auth nonce for which the Dapp or client can generate EDDSA key pairs. + // GetAuthNonce get the auth nonce for which the dApp or client can generate EDDSA key pairs. GetAuthNonce(ctx context.Context, account string, domain string) (*spdb.OffChainAuthKey, error) - // UpdateUserPublicKey updates the user public key once the Dapp or client generates the EDDSA key pairs. - UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, userPublicKey string, expiryDate int64) (bool, error) + // UpdateUserPublicKey updates the user public key once the dApp or client generates the EDDSA key pairs. + UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, + userPublicKey string, expiryDate int64) (bool, error) // VerifyOffChainSignature verifies the signature signed by user's EDDSA private key. VerifyOffChainSignature(ctx context.Context, account string, domain string, offChainSig string, realMsgToSign string) (bool, error) } -// Approver is the interface to handle ask approval. +// Approver is an abstract interface to handle asking approval requests. type Approver interface { Modular // PreCreateBucketApproval prepares to handle CreateBucketApproval, it can do some - // checks Example: check for duplicates, if limit specified by SP is reached, etc. + // checks such as checking for duplicates, if limitation of SP has been reached, etc. PreCreateBucketApproval(ctx context.Context, task task.ApprovalCreateBucketTask) error - // HandleCreateBucketApprovalTask handles the CreateBucketApproval, set expired - // height and sign the MsgCreateBucket etc. + // HandleCreateBucketApprovalTask handles the CreateBucketApproval, it can set expired height, sign the MsgCreateBucket and so on. HandleCreateBucketApprovalTask(ctx context.Context, task task.ApprovalCreateBucketTask) (bool, error) - // PostCreateBucketApproval is called after HandleCreateBucketApprovalTask, it can - // recycle resources, statistics and other operations. + // PostCreateBucketApproval is called after HandleCreateBucketApprovalTask, it can recycle resources, make statistics + // and do some other operations. PostCreateBucketApproval(ctx context.Context, task task.ApprovalCreateBucketTask) // PreCreateObjectApproval prepares to handle CreateObjectApproval, it can do some - // checks Example: check for duplicates, if limit specified by SP is reached, etc. + // checks such as check for duplicates, if limitation of SP has been reached, etc. PreCreateObjectApproval(ctx context.Context, task task.ApprovalCreateObjectTask) error - // HandleCreateObjectApprovalTask handles the MsgCreateObject, set expired height - // and sign the MsgCreateBucket etc. + // HandleCreateObjectApprovalTask handles the CreateObjectApproval, it can set expired height, sign the MsgCreateObject and so on. HandleCreateObjectApprovalTask(ctx context.Context, task task.ApprovalCreateObjectTask) (bool, error) // PostCreateObjectApproval is called after HandleCreateObjectApprovalTask, it can - // recycle resources, statistics and other operations. + // recycle resources, make statistics and do some other operations. PostCreateObjectApproval(ctx context.Context, task task.ApprovalCreateObjectTask) - // QueryTasks queries tasks that running on approver by task sub key. + // QueryTasks queries tasks that running on approver by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) } -// Downloader is the interface to handle get object request from user account, and get -// challenge info request from other components in the system. +// Downloader is an abstract interface to handle getting object requests from users' account, and getting +// challenge info requests from other components in the system. type Downloader interface { Modular // PreDownloadObject prepares to handle DownloadObject, it can do some checks - // Example: check for duplicates, if limit specified by SP is reached, etc. + // such as checking for duplicates, if limitation of SP has been reached, etc. PreDownloadObject(ctx context.Context, task task.DownloadObjectTask) error - // HandleDownloadObjectTask handles the DownloadObject, get data from piece store. + // HandleDownloadObjectTask handles the DownloadObject and get data from piece store. HandleDownloadObjectTask(ctx context.Context, task task.DownloadObjectTask) ([]byte, error) // PostDownloadObject is called after HandleDownloadObjectTask, it can recycle - // resources, statistics and other operations. + // resources, make statistics and do some other operations.. PostDownloadObject(ctx context.Context, task task.DownloadObjectTask) - // PreDownloadPiece prepares to handle DownloadPiece, it can do some checks - // Example: check for duplicates, if limit specified by SP is reached, etc. + // PreDownloadPiece prepares to handle DownloadPiece, it can do some checks such as check for duplicates, + // if limitation of SP has been reached, etc. PreDownloadPiece(ctx context.Context, task task.DownloadPieceTask) error - // HandleDownloadPieceTask handles the DownloadPiece, get data from piece store. + // HandleDownloadPieceTask handles the DownloadPiece and get data from piece store. HandleDownloadPieceTask(ctx context.Context, task task.DownloadPieceTask) ([]byte, error) // PostDownloadPiece is called after HandleDownloadPieceTask, it can recycle - // resources, statistics and other operations. + // resources, make statistics and do some other operations. PostDownloadPiece(ctx context.Context, task task.DownloadPieceTask) // PreChallengePiece prepares to handle ChallengePiece, it can do some checks - // Example: check for duplicates, if limit specified by SP is reached, etc. + // such as checking for duplicates, if limitation of SP has been reached, etc. PreChallengePiece(ctx context.Context, task task.ChallengePieceTask) error - // HandleChallengePiece handles the ChallengePiece, get piece data from piece - // store and get integrity hash from db. + // HandleChallengePiece handles ChallengePiece, get piece data from piece store and get integrity hash from db. HandleChallengePiece(ctx context.Context, task task.ChallengePieceTask) ([]byte, [][]byte, []byte, error) - // PostChallengePiece is called after HandleChallengePiece, it can recycle - // resources, statistics and other operations. + // PostChallengePiece is called after HandleChallengePiece, it can recycle resources, make statistics + // and do some other operations. PostChallengePiece(ctx context.Context, task task.ChallengePieceTask) - // QueryTasks queries download/challenge tasks that running on downloader by - // task sub key. + // QueryTasks queries download/challenge tasks that running on downloader by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) } -// TaskExecutor is the interface to handle background task, it will ask task from -// manager modular, handle the task and report the result or status to the manager -// modular includes: ReplicatePieceTask, SealObjectTask, ReceivePieceTask, GCObjectTask -// GCZombiePieceTask, GCMetaTask. +// TaskExecutor is an abstract interface to handle background tasks. +// It will ask tasks from manager modular, handle tasks and report the results or status to the manager modular +// It can handle these tasks: ReplicatePieceTask, SealObjectTask, ReceivePieceTask, GCObjectTask, GCZombiePieceTask, GCMetaTask. type TaskExecutor interface { Modular - // AskTask asks the task by remaining limit from manager modular. + // AskTask asks the task by remaining limitation from manager module. AskTask(ctx context.Context) error - // HandleReplicatePieceTask handles the ReplicatePieceTask that is asked from - // manager modular. + // HandleReplicatePieceTask handles ReplicatePieceTask that is asked from manager module. HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) - // HandleSealObjectTask handles the SealObjectTask that is asked from manager - // modular. + // HandleSealObjectTask handles SealObjectTask that is asked from manager module. HandleSealObjectTask(ctx context.Context, task task.SealObjectTask) - // HandleReceivePieceTask handles the ReceivePieceTask that is asked from manager - // modular. It will confirm the object that as secondary SP whether has been sealed. + // HandleReceivePieceTask handles the ReceivePieceTask that is asked from manager module. + // It will confirm the piece data that is synced to secondary SP whether has been sealed. HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) - // HandleGCObjectTask handles the GCObjectTask that is asked from manager modular. + // HandleGCObjectTask handles the GCObjectTask that is asked from manager module. HandleGCObjectTask(ctx context.Context, task task.GCObjectTask) - // HandleGCZombiePieceTask handles the GCZombiePieceTask that is asked from manager - // modular. + // HandleGCZombiePieceTask handles the GCZombiePieceTask that is asked from manager module. HandleGCZombiePieceTask(ctx context.Context, task task.GCZombiePieceTask) - // HandleGCMetaTask handles the GCMetaTask that is asked from manager modular. + // HandleGCMetaTask handles the GCMetaTask that is asked from manager module. HandleGCMetaTask(ctx context.Context, task task.GCMetaTask) - // ReportTask reports the result or status of running task to manager modular. + // ReportTask reports the results or status of running task to manager module. ReportTask(ctx context.Context, task task.Task) error } -// Manager is the interface to SP's manage modular, it is Responsible for task +// Manager is an abstract interface to do some internal services management, it is responsible for task // scheduling and other management of SP. type Manager interface { Modular - // DispatchTask dispatches the task to TaskExecutor modular when it asks task. - // It will consider task remaining resources when dispatches task. + // DispatchTask dispatches the task to TaskExecutor module when it asks tasks. + // It will consider task remaining resources when dispatching task. DispatchTask(ctx context.Context, limit rcmgr.Limit) (task.Task, error) - // QueryTasks queries tasks that hold on manager by task sub key. + // QueryTasks queries tasks that hold on manager by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) - // HandleCreateUploadObjectTask handles the CreateUploadObject request from - // Uploader, before Uploader handles the user's UploadObject request, it should - // send CreateUploadObject request to Manager ask if it's ok. Through this - // interface that SP implements the global upload object strategy. + // HandleCreateUploadObjectTask handles the CreateUploadObject request from Uploader, before Uploader handles + // the users' UploadObject requests, it should send CreateUploadObject requests to Manager ask if it's ok. + // Through this interface SP implements the global uploading object strategy. // - // Example: control the concurrency of global uploads, avoid repeated uploads, - // rate control, etc. + // For example: control the concurrency of global uploads, avoid repeated uploads, rate control, etc. HandleCreateUploadObjectTask(ctx context.Context, task task.UploadObjectTask) error - // HandleDoneUploadObjectTask handles the result of uploading object payload - // data to primary, Manager should generate ReplicatePieceTask for TaskExecutor - // to run. + // HandleDoneUploadObjectTask handles the result of uploading object payload data to primary, Manager should + // generate ReplicatePieceTask for TaskExecutor to run. HandleDoneUploadObjectTask(ctx context.Context, task task.UploadObjectTask) error - // HandleReplicatePieceTask handles the result of replicating pieces data to - // secondary SPs, the request comes from TaskExecutor. - HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) error - // HandleSealObjectTask handles the result of sealing object to the greenfield + // HandleReplicatePieceTask handles the result of replicating piece data to secondary SPs, // the request comes from TaskExecutor. + HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) error + // HandleSealObjectTask handles the result of sealing object to the greenfield the request comes from TaskExecutor. HandleSealObjectTask(ctx context.Context, task task.SealObjectTask) error - // HandleReceivePieceTask handles the result of receiving piece task, the request - // comes from Receiver that reports have completed the receive task to manager and - // TaskExecutor that the result of confirming whether the object as secondary SP - // has been sealed. + // HandleReceivePieceTask handles the result of receiving piece task, the request comes from Receiver that + // reports have completed ReceivePieceTask to manager and TaskExecutor that the result of confirming whether + // the object that is synced to secondary SP has been sealed. HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) error - // HandleGCObjectTask handles the result or status of GCObjectTask, the request - // comes from TaskExecutor. + // HandleGCObjectTask handles GCObjectTask, the request comes from TaskExecutor. HandleGCObjectTask(ctx context.Context, task task.GCObjectTask) error - // HandleGCZombiePieceTask handles the result or status GCZombiePieceTask, the - // request comes from TaskExecutor. + // HandleGCZombiePieceTask handles GCZombiePieceTask, the request comes from TaskExecutor. HandleGCZombiePieceTask(ctx context.Context, task task.GCZombiePieceTask) error - // HandleGCMetaTask handles the result or status GCMetaTask, the request comes - // from TaskExecutor. + // HandleGCMetaTask handles GCMetaTask, the request comes from TaskExecutor. HandleGCMetaTask(ctx context.Context, task task.GCMetaTask) error - // HandleDownloadObjectTask handles the result DownloadObjectTask, the request comes - // from Downloader. + // HandleDownloadObjectTask handles DownloadObjectTask, the request comes from Downloader. HandleDownloadObjectTask(ctx context.Context, task task.DownloadObjectTask) error - // HandleChallengePieceTask handles the result ChallengePieceTask, the request comes - // from Downloader. + // HandleChallengePieceTask handles ChallengePieceTask, the request comes from Downloader. HandleChallengePieceTask(ctx context.Context, task task.ChallengePieceTask) error } -// P2P is the interface to the interaction of control information between Sps. +// P2P is an abstract interface to the to do replicate piece approvals between SPs. type P2P interface { Modular - // HandleReplicatePieceApproval handles the ask replicate piece approval, it will - // broadcast the approval to other SPs, wait the responses, if up to min approved - // number or max approved number before timeout, will return the approvals. - HandleReplicatePieceApproval(ctx context.Context, task task.ApprovalReplicatePieceTask, - min, max int32, timeout int64) ([]task.ApprovalReplicatePieceTask, error) + // HandleReplicatePieceApproval handles the asking replicate piece approval, it will + // broadcast the approval to other SPs, waiting the responses. If up to min approved + // number or max approved number before timeout, it will return the approvals. + HandleReplicatePieceApproval(ctx context.Context, task task.ApprovalReplicatePieceTask, min, max int32, + timeout int64) ([]task.ApprovalReplicatePieceTask, error) // HandleQueryBootstrap handles the query p2p node bootstrap node info. HandleQueryBootstrap(ctx context.Context) ([]string, error) - // QueryTasks queries replicate piece approval tasks that running on p2p by task - // sub key. + // QueryTasks queries replicate piece approval tasks that running on p2p by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) } -// Receiver is the interface to receive the piece data from primary SP. +// Receiver is an abstract interface to receive the piece data from primary SP. type Receiver interface { Modular - // HandleReceivePieceTask stores the piece data from primary SP. + // HandleReceivePieceTask stores piece data into secondary SP. HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask, data []byte) error - // HandleDoneReceivePieceTask calculates the integrity hash of the object and sign - // it, returns to the primary SP for seal object. + // HandleDoneReceivePieceTask calculates the integrity hash of the object and sign it, returns to the primary + // SP for sealed object. HandleDoneReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) ([]byte, []byte, error) - // QueryTasks queries replicate piece tasks that running on receiver by task sub - // key. + // QueryTasks queries replicate piece tasks that running on receiver by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) } -// Signer is the interface to handle the SP's sign and on greenfield chain operator. -// It holds SP all private key. Considering the sp account's sequence number, it must -// be a singleton. +// Signer is an abstract interface to handle the signature of SP and on greenfield chain operator. +// It holds all private keys of one SP. Considering the SP account's sequence number, it must be a singleton. type Signer interface { Modular - // SignCreateBucketApproval signs the MsgCreateBucket for asking create bucket - // approval. + // SignCreateBucketApproval signs the MsgCreateBucket for asking create bucket approval. SignCreateBucketApproval(ctx context.Context, bucket *storagetypes.MsgCreateBucket) ([]byte, error) - // SignCreateObjectApproval signs the MsgCreateObject for asking create object - // approval. + // SignCreateObjectApproval signs the MsgCreateObject for asking create object approval. SignCreateObjectApproval(ctx context.Context, task *storagetypes.MsgCreateObject) ([]byte, error) - // SignReplicatePieceApproval signs the ApprovalReplicatePieceTask for asking - // replicate pieces to secondary SPs. + // SignReplicatePieceApproval signs the ApprovalReplicatePieceTask for asking replicate pieces to secondary SPs. SignReplicatePieceApproval(ctx context.Context, task task.ApprovalReplicatePieceTask) ([]byte, error) - // SignReceivePieceTask signs the ReceivePieceTask for replicating pieces data - // between SPs. + // SignReceivePieceTask signs the ReceivePieceTask for replicating pieces data between SPs. SignReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) ([]byte, error) // SignIntegrityHash signs the integrity hash of object for sealing object. SignIntegrityHash(ctx context.Context, objectID uint64, hash [][]byte) ([]byte, []byte, error) @@ -259,20 +235,18 @@ type Signer interface { DiscontinueBucket(ctx context.Context, bucket *storagetypes.MsgDiscontinueBucket) error } -// Uploader is the interface to handle put object request from user account, and -// store it in primary SP's piece store. +// Uploader is an abstract interface to handle putting object requests from users' account and store +// their payload data into primary SP piece store. type Uploader interface { Modular // PreUploadObject prepares to handle UploadObject, it can do some checks - // Example: check for duplicates, if limit specified by SP is reached, etc. + // such as checking for duplicates, if limitation of SP has been reached, etc. PreUploadObject(ctx context.Context, task task.UploadObjectTask) error - // HandleUploadObjectTask handles the UploadObject, store the payload data - // to piece store by data stream. + // HandleUploadObjectTask handles the UploadObject, store payload data into piece store by data stream. HandleUploadObjectTask(ctx context.Context, task task.UploadObjectTask, stream io.Reader) error // PostUploadObject is called after HandleUploadObjectTask, it can recycle - // resources, statistics and other operations. + // resources, make statistics and do some other operations. PostUploadObject(ctx context.Context, task task.UploadObjectTask) - // QueryTasks queries upload object tasks that running on uploading by task - // sub key. + // QueryTasks queries upload object tasks that running on uploading by task sub-key. QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) } diff --git a/core/module/null_modular.go b/core/module/null_modular.go index 3b7a4d584..77b427553 100644 --- a/core/module/null_modular.go +++ b/core/module/null_modular.go @@ -5,11 +5,12 @@ import ( "errors" "io" + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspp2p" "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" corespdb "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/core/task" - storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) var ( diff --git a/core/piecestore/piecestore.go b/core/piecestore/piecestore.go index 077b7b695..8a2ed9699 100644 --- a/core/piecestore/piecestore.go +++ b/core/piecestore/piecestore.go @@ -4,7 +4,7 @@ import ( "context" ) -// PieceOp is the helper interface for piece key operator and piece size calculate. +// PieceOp is a helper interface for piece key operator and piece size calculate. type PieceOp interface { // SegmentPieceKey returns the segment piece key used as the key of store piece store. SegmentPieceKey(objectID uint64, segmentIdx uint32) string @@ -27,7 +27,7 @@ type PieceOp interface { ECPieceSize(payloadSize uint64, segmentIdx uint32, maxSegmentSize uint64, chunkNum uint32) int64 } -// PieceStore is the interface to piece store that store the object payload data. +// PieceStore is an abstract interface to piece store that store the object payload data. type PieceStore interface { // GetPiece returns the piece data from piece store by piece key. // the piece can segment or ec piece key. diff --git a/core/rcmgr/README.md b/core/rcmgr/README.md index 3f2f2b212..88a94c953 100644 --- a/core/rcmgr/README.md +++ b/core/rcmgr/README.md @@ -1,44 +1,35 @@ # ResourceManager -ResourceManager is the interface to the resource management subsystem. -The ResourceManager tracks and accounts for resource usage in the stack, -from the internals to the application, and provides a mechanism to limit -resource usage according to a user configurable policy. +ResourceManager interface manages resources within SP system, tracking and accounting for usage across the stack, +from internal components to applications. It also allows for resource usage to be limited based on user-configurable policies. -# Concept +## Concept -## Limit +### Limit -The modus operandi of the resource manager is to restrict resource usage -at the time of reservation. When a component of the stack needs to use a -resource, it reserves it in the appropriate scope. The resource manager -gates the reservation against the scope applicable limits; if the limit is -exceeded, then an error is up the component to act accordingly. At the lower -levels of the stack, this will normally signal a failure of some sorts, like -failing to opening a connection, which will propagate to the programmer. Some -components may be able to handle resource reservation failure more gracefully. +The module of the resource manager is to restrict resource usage at the time of reservation. When a component of the stack +needs to use a resource, it reserves it in the appropriate scope. The resource manager gates the reservation against the +scope applicable limits; if the limit is exceeded, then an error is up the component to act accordingly. At the lower +levels of the stack, this will normally signal a failure of some sorts, like failing to opening a connection, which will +propagate to the programmer. Some components may be able to handle resource reservation failure more gracefully. -The `Limit` is configured by the user policy. supports `MemoryLimit`,`ConnectionsLimit`, -`FDLimit(file descriptor limit)`, `TaskLimit`. The `MemoryLimit` limits maximum memory -for a component reservation. The `ConnectionsLimit` limits maximum connections number -for a component reservation, includes inbound and outbound connections. The `FDLimit` -limits maximum fd number for a component reservation, includes the open of sockets and -files. +The `Limit` is configured by the user policy. supports `MemoryLimit`,`ConnectionsLimit`, `FDLimit(file descriptor limit)`, +`TaskLimit`. The `MemoryLimit` limits maximum memory for a component reservation. The `ConnectionsLimit` limits maximum +connections number for a component reservation, includes inbound and outbound connections. The `FDLimit`limits maximum +fd number for a component reservation, includes the open of sockets and files. -The `TaskLimit` is unique to SP. Task is the smallest unit of SP internal component -interaction. Each component can limit the number of tasks executed. Tasks are divided -into high, medium and low priorities, the priority can be used as an important basis -for task scheduling within the SP. The higher the priority, the faster it is expected -to be executed, and the resources will be assigned priority for execution, for example: -seal object. The lower the priority, it can be executed later, and the resource -requirements are not so urgent, for example: delayed deletion. +The `TaskLimit` is unique to SP. Task is the smallest unit of SP internal component interaction. Each component can limit +the number of tasks executed. Tasks are divided into high, medium and low priorities, the priority can be used as an +important basis for task scheduling within SP. The higher the priority, the faster it is expected to be executed, and +the resources will be assigned priority for execution, such as seal object. The lower the priority, it can be executed +later, and the resource requirements are not so urgent, such as delayed deletion. -## Scope +### Scope -Resource Management through the ResourceManager is based on the concept of Resource -Management Scopes, whereby resource usage is constrained by a DAG of scopes, The following -diagram illustrates the structure of the resource constraint DAG: -``` +Resource Management through the ResourceManager is based on the concept of Resource Management Scopes, whereby resource +usage is constrained by a DAG of scopes, The following diagram illustrates the structure of the resource constraint DAG: + +```text System(Topmost Scope) +------------> Transient(Scope)........................+................+ @@ -48,15 +39,14 @@ System(Topmost Scope) +---> Connection/Memory/Task---------- . ----------+ . ``` -Scope is an important node in DAG, Scope has children and siblings scopes. There is a -directed edge between them. The children scopes share the limit of the parent scope, the -child scope reserves the resources, the parent scope will reduce the corresponding amount -of resources. Sibling scopes also have directionality, for example Service A Scope depends -on(points to) System Scope, Service A reserves the resources, the System Scope will reduce -the corresponding amount of resources. On the contrary, if the System Scope reserves resources, -it will not affect Service A. +Scope is an important node in DAG, Scope has children and siblings scopes. There is a directed edge between them. The +children scopes share the limit of the parent scope, the child scope reserves the resources, the parent scope will reduce +the corresponding amount of resources. Sibling scopes also have directionality, for example Service A Scope depends +on(points to) System Scope, Service A reserves the resources, the System Scope will reduce the corresponding amount of +resources. On the contrary, if the System Scope reserves resources, it will not affect Service A. + +## Example -# Example ```go rcmgr := &ResourceManager{} serviceScope, err := rcmgr.OpenService(...) @@ -69,4 +59,4 @@ it will not affect Service A. if err := s.ReserveMemory(...); err != nil { ... } // ... use memory -``` \ No newline at end of file +``` diff --git a/core/rcmgr/rcmgr.go b/core/rcmgr/rcmgr.go index 72b3f3be2..2f7b994dc 100644 --- a/core/rcmgr/rcmgr.go +++ b/core/rcmgr/rcmgr.go @@ -29,7 +29,7 @@ import ( // file descriptors and task. These account for both space and time used by the stack, // as each resource has a direct effect on the system availability and performance. // -// The modus operandi of the resource manager is to restrict resource usage at the +// The module of the resource manager is to restrict resource usage at the // time of reservation. When a component of the stack needs to use a resource, it // reserves it in the appropriate scope. The resource manager gates the reservation // against the scope applicable limits; if the limit is exceeded, then an error is up @@ -75,7 +75,7 @@ type ResourceManager interface { // ResourceScopeViewer is a mixin interface providing view methods for accessing top level scopes type ResourceScopeViewer interface { - // ViewSystem views the system wide resource scope. + // ViewSystem views the system-wide resource scope. // The system scope is the top level scope that accounts for global // resource usage at all levels of the system. This scope constrains all // other scopes and institutes global hard limits. diff --git a/core/task/README.md b/core/task/README.md index 140952a92..c4e77e6a6 100644 --- a/core/task/README.md +++ b/core/task/README.md @@ -1,156 +1,135 @@ # Task -Task is the interface to the smallest unit of SP background service interaction. - -# Concept +Task is an abstract interface to describe the smallest unit of SP background service how to interact. ## Task Type -There are three main types of task, ApprovalTask, ObjectTask and GCTask. - -The ApprovalTask is used to record the ask approval information, for user -creating bucket and object need ask primary SP approval if willing serve -the bucket and object, the SP will sign the approval msg if it approved -the msg, and the greenfield will verify the signature of the approval msg -to judge whether SP accepts the bucket and object, for primary replicating -pieces to the secondary SPs need broadcast the approval msg to other SPs, -if they approved the msg, the primary SP will pick up some of them that -approved the msg and replicate the pieces to the these, and they will verify -the signature of the approval msg before receive the pieces. so the -ApprovalTask includes ApprovalCreateBucketTask, ApprovalCreateBucketTask and -ApprovalReplicatePieceTask. - -The ObjectTask associated with an object, and records the information of -different stages of the object, includes UploadObjectTask stands upload the -object payload data to the primary SP, ReplicatePieceTask stands replicate -the object pieces to the secondary SPs, ReceivePieceTask only belong to the -secondary SP, records the information of receiving piece and the secondary SP -use it to confirm the object if success to seal on the greenfield, this will -guarantee a return of the secondary SP. SealObjectTask stands seal object on -the greenfield, DownloadObjectTask stands the user download the part or all -object payload data, ChallengePieceTask stands the validator get the challenge -piece info, the validator to challenge the SP if store the user's payload data -correctly by this way. - -The GCTask is the interface to record the information of garbage collection, -includes GCObjectTask stands the collection of piece store space by deleting -the payload data that has been deleted on the greenfield, GCZombiePieceTask -stands the collection of piece store space by deleting zombie pieces data that -dues to any exception, the piece data meta is not on the greenfield, GCMetaTask -stands the collection of the SP meta store space by deleting the expired data. +There are three main types of task: ApprovalTask, ObjectTask and GCTask. + +ApprovalTask is used to record approval information for users creating buckets and objects. Primary SP approval is +required before serving the bucket and object. If SP approves the message, it will sign the approval message. The +greenfield will verify the signature of the approval message to determine whether the SP accepts the bucket and object. +When primary replicating pieces to secondary SPs, the approval message is broadcast to other SPs. If they approve the +message, the primary SP will select some of them to replicate the pieces to. Before receiving the pieces, the selected SPs +will verify the signature of the approval message. ApprovalTask includes ApprovalCreateBucketTask, ApprovalCreateBucketTask +and ApprovalReplicatePieceTask. + +ObjectTask is associated with an object and records information about its different stages. This includes +UploadObjectTask, which uploads the object payload data to the primary SP, ReplicatePieceTask, which replicates the +object pieces to the secondary SPs, and the ReceivePieceTask, which is exclusive to the secondary SP and records +information about receiving the piece. The secondary SP uses this information to confirm whether the object was +successfully sealed on the greenfield, ensuring a return of the secondary SP. SealObjectTask seals the object on Greenfield, +while the DownloadObjectTask allows the user to download part or all of the object payload data. ChallengePieceTask +provides the validator with challenge piece information, which they can use to challenge the SP if they suspect that +the user's payload data was not stored correctly. + +GCTask is an abstract interface that records information about garbage collection. This includes GCObjectTask, +which collects piece store space by deleting payload data that has been deleted on the greenfield, GCZombiePieceTask, +which collects piece store space by deleting zombie piece data that resulted from any exception where the piece data +meta is not on Greenfield chain, and GCMetaTask, which collects the SP meta store space by deleting expired data. ### Approval Task -ApprovalTask is the interface to record the ask approval information, the -approval task timeliness uses the block height, if reached expired height, -the approval invalid. +ApprovalTask is an abstract interface to record the ask approval information, the approval task timeliness uses the block height, +if reached expired height, the approval invalid. + +#### ApprovalCreateBucketTask -#### ApprovalCreateBucketTask -ApprovalCreateBucketTask is the interface to record the ask create bucket -approval information. The user account will create MsgCreateBucket, the SP -should decide whether approved the request based on the MsgCreateBucket. -If so, the sp will SetExpiredHeight and signs the MsgCreateBucket. +ApprovalCreateBucketTask is an abstract interface to record the ask create bucket approval information. The user account will +create MsgCreateBucket, the SP should decide whether approved the request based on the MsgCreateBucket. If so, the sp +will SetExpiredHeight and signs the MsgCreateBucket. #### ApprovalCreateObjectTask -ApprovalCreateObjectTask is the interface to record the ask create object -approval information. The user account will create MsgCreateObject, the SP -should decide whether approved the request based on the MsgCreateObject. -If so, the sp will SetExpiredHeight and signs the MsgCreateObject. + +ApprovalCreateObjectTask is an abstract interface to record the ask create object approval information. The user account will +create MsgCreateObject, the SP should decide whether approved the request based on the MsgCreateObject. If so, the sp +will SetExpiredHeight and signs the MsgCreateObject. #### ApprovalReplicatePieceTask -ApprovalReplicatePieceTask is the interface to record the ask replicate pieces -to other SPs(as secondary SP for the object). It is initiated by the primary SP -in the replicate pieces phase. Before the primary SP sends it to other SPs, the -primary SP will sign the task, other SPs will verify it is sent by a legitimate -SP. If other SPs approved the approval, they will SetExpiredHeight and signs the -ApprovalReplicatePieceTask. + +ApprovalReplicatePieceTask is an abstract interface to record the ask replicate pieces to other SPs(as secondary SP for the object). +It is initiated by the primary SP in the replicate pieces phase. Before the primary SP sends it to other SPs, the primary +SP will sign the task, other SPs will verify it is sent by a legitimate SP. If other SPs approved the approval, they will +SetExpiredHeight and signs the ApprovalReplicatePieceTask. ### Object Task -The ObjectTask associated with an object and storage params, and records the -information of different stages of the object. Considering the change of storage -params on the greenfield, the storage params of each object should be determined -when it is created, and it should not be queried during the task flow, which is -inefficient and error-prone. +The ObjectTask associated with an object and storage params, and records the information of different stages of the object. +Considering the change of storage params on the greenfield, the storage params of each object should be determined when +it is created, and it should not be queried during the task flow, which is inefficient and error-prone. #### UploadObjectTask -The UploadObjectTask is the interface to record the information for uploading -object payload data to the primary SP. + +The UploadObjectTask is an abstract interface to record the information for uploading object payload data to the primary SP. #### ReplicatePieceTask -The ReplicatePieceTask is the interface to record the information for replicating -pieces of object payload data to secondary SPs. + +The ReplicatePieceTask is an abstract interface to record the information for replicating pieces of object payload data to secondary SPs. #### ReceivePieceTask -The ReceivePieceTask is the interface to record the information for receiving -pieces of object payload data from primary SP, it exists only in secondary SP. + +The ReceivePieceTask is an abstract interface to record the information for receiving pieces of object payload data from primary +SP, it exists only in secondary SP. #### SealObjectTask -The SealObjectTask is the interface to record the information for sealing object -to the Greenfield chain. + +The SealObjectTask is an abstract interface to record the information for sealing object to the Greenfield chain. #### DownloadObjectTask -The DownloadObjectTask is the interface to record the information for downloading -pieces of object payload data. + +The DownloadObjectTask is an abstract interface to record the information for downloading pieces of object payload data. #### ChallengePieceTask -ChallengePieceTask is the interface to record the information for get challenge -piece info, the validator get challenge info to confirm whether the sp stores -the user's data correctly. +ChallengePieceTask is an abstract interface to record the information for get challenge piece info, the validator get challenge +info to confirm whether the sp stores the user's data correctly. ### GC Task #### GCObjectTask -The GCObjectTask is the interface to record the information for collecting the -piece store space by deleting object payload data that the object has been deleted -on the Greenfield chain. + +The GCObjectTask is an abstract interface to record the information for collecting the piece store space by deleting object payload +data that the object has been deleted on Greenfield chain. #### GCZombiePieceTask -The GCZombiePieceTask is the interface to record the information for collecting -the piece store space by deleting zombie pieces data that dues to any exception, -the piece data meta is not on chain but the pieces has been store in piece store. + +The GCZombiePieceTask is an abstract interface to record the information for collecting the piece store space by deleting zombie +pieces data that dues to any exception, the piece data meta is not on chain but the pieces has been store in piece store. #### GCMetaTask -The GCMetaTask is the interface to record the information for collecting the SP -meta store space by deleting the expired data. +The GCMetaTask is an abstract interface to record the information for collecting the SP meta store space by deleting the expired data. ## Task Priority -Each type of task has a priority, the range of priority is [0, 255], the higher -the priority, the higher the urgency to be executed, the greater the probability -of being executed by priority scheduling. - +Each type of task has a priority, the range of priority is [0, 255], the higher the priority, the higher the urgency to +be executed, the greater the probability of being executed by priority scheduling. ## Task Priority Level -Task priority is divided into three levels, TLowPriorityLevel, TMediumPriorityLevel, -THighPriorityLevel. The TLowPriorityLevel default priority range is [0, 85), The -TMediumPriorityLevel default priority range is [85, 170), The THighPriorityLevel -default priority range is [170, 256). When allocating for task execution resources -from ResourceManager, the resources are allocated according to task priority level, -but not task priority, because task priority up to 256 levels, the task priority +Task priority is divided into three levels, TLowPriorityLevel, TMediumPriorityLevel, THighPriorityLevel. The TLowPriorityLevel +default priority range is [0, 85), The TMediumPriorityLevel default priority range is [85, 170), The THighPriorityLevel +default priority range is [170, 256). When allocating for task execution resources from ResourceManager, the resources +are allocated according to task priority level, but not task priority, because task priority up to 256 levels, the task priority level make resource management easier. -```go - Example: - the resource limit configuration of task execution node : - [TasksHighPriority: 30, TasksMediumPriority: 20, TasksLowPriority: 2] - the executor of the task can run 30 high level tasks at the same time that the - task priority between [170, 255] - the executor of the task can run 20 medium level tasks at the same time that the - task priority between [85, 170) - the executor of the task can run 2 medium level tasks at the same time that the - task priority < 85 + +```text +Example: + the resource limit configuration of task execution node : + [TasksHighPriority: 30, TasksMediumPriority: 20, TasksLowPriority: 2] + the executor of the task can run 30 high level tasks at the same time that the + task priority between [170, 255] + the executor of the task can run 20 medium level tasks at the same time that the + task priority between [85, 170) + the executor of the task can run 2 medium level tasks at the same time that the + task priority < 85 ``` ## Task Init -Each task needs to call its InitXXXTask method before use. This method requires passing -in the necessary parameters of each type of task. These parameters will not be changed -in most cases and are necessary, such as task priority, timeout, max retries, and -necessary information for resource estimation. +Each task needs to call its InitXXXTask method before use. This method requires passing in the necessary parameters of +each type of task. These parameters will not be changed in most cases and are necessary, such as task priority, timeout, +max retries, and necessary information for resource estimation. -Any changes to initialization parameters during task execution may cause unpredictable -consequences. For example, changes in parameters that affect resource estimation may -lead to OOM, etc. +Any changes to initialization parameters during task execution may cause unpredictable consequences. For example, changes +in parameters that affect resource estimation may cause OOM, etc. diff --git a/core/task/const.go b/core/task/const.go index d4869a491..807b44fdc 100644 --- a/core/task/const.go +++ b/core/task/const.go @@ -25,8 +25,7 @@ const ( TypeTaskReplicatePieceApproval // TypeTaskUpload defines the type of uploading object to primary SP task. TypeTaskUpload - // TypeTaskReplicatePiece defines the type of replicating pieces to secondary SPs - // task. + // TypeTaskReplicatePiece defines the type of replicating pieces to secondary SPs task. TypeTaskReplicatePiece // TypeTaskSealObject defines the type of sealing object to the chain task. TypeTaskSealObject diff --git a/core/task/null_task.go b/core/task/null_task.go index 2d79fc605..e5d8bfd92 100644 --- a/core/task/null_task.go +++ b/core/task/null_task.go @@ -1,8 +1,9 @@ package task import ( - "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + + "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" ) var _ Task = (*NullTask)(nil) diff --git a/core/task/task.go b/core/task/task.go index c739c0157..3111d1da9 100644 --- a/core/task/task.go +++ b/core/task/task.go @@ -1,86 +1,20 @@ package task import ( - "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + + "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" ) -// Task is the interface to the smallest unit of SP service interaction. -// -// Task Type: -// -// There are three main types of task, ApprovalTask, ObjectTask and GCTask. -// The ApprovalTask is used to record the ask approval information, for user -// creating bucket and object need ask primary SP approval if willing serve -// the bucket and object, the SP will sign the approval msg if it approved -// the msg, and the greenfield will verify the signature of the approval msg -// to judge whether SP accepts the bucket and object, for primary replicating -// pieces to the secondary SPs need broadcast the approval msg to other SPs, -// if they approved the msg, the primary SP will pick up some of them that -// approved the msg and replicate the pieces to the these, and they will verify -// the signature of the approval msg before receive the pieces. so the -// ApprovalTask includes ApprovalCreateBucketTask, ApprovalCreateBucketTask and -// ApprovalReplicatePieceTask. -// The ObjectTask associated with an object, and records the information of -// different stages of the object, includes UploadObjectTask stands upload the -// object payload data to the primary SP, ReplicatePieceTask stands replicate -// the object pieces to the secondary SPs, ReceivePieceTask only belong to the -// secondary SP, records the information of receiving piece and the secondary SP -// use it to confirm the object if success to seal on the greenfield, this will -// guarantee a return of the secondary SP. SealObjectTask stands seal object on -// the greenfield, DownloadObjectTask stands the user download the part or all -// object payload data, ChallengePieceTask stands the validator get the challenge -// piece info, the validator to challenge the SP if store the user's payload data -// correctly by this way. -// The GCTask is the interface to record the information of garbage collection, -// includes GCObjectTask stands the collection of piece store space by deleting -// the payload data that has been deleted on the greenfield, GCZombiePieceTask -// stands the collection of piece store space by deleting zombie pieces data that -// dues to any exception, the piece data meta is not on the greenfield, GCMetaTask -// stands the collection of the SP meta store space by deleting the expired data. -// -// Task Priority: -// -// Each type of task has a priority, the range of priority is [0, 255], the higher -// the priority, the higher the urgency to be executed, the greater the probability -// of being executed by priority scheduling. -// -// Task Priority Level: -// -// Task priority is divided into three levels, TLowPriorityLevel, TMediumPriorityLevel, -// THighPriorityLevel. The TLowPriorityLevel default priority range is [0, 85), The -// TMediumPriorityLevel default priority range is [85, 170), The THighPriorityLevel -// default priority range is [170, 256). When allocating for task execution resources -// from ResourceManager, the resources are allocated according to task priority level, -// but not task priority, because task priority up to 256 levels, the task priority -// level make resource management easier. -// Example: -// the resource limit configuration of task execution node : -// [TasksHighPriority: 30, TasksMediumPriority: 20, TasksLowPriority: 2] -// the executor of the task can run 30 high level tasks at the same time that the -// task priority >= 170 -// the executor of the task can run 20 medium level tasks at the same time that the -// task priority between [85, 170) -// the executor of the task can run 2 medium level tasks at the same time that the -// task priority < 85 -// -// Task Init: -// -// Each task needs to call its InitXXX method before use. This method requires passing -// in the necessary parameters of each type of task. These parameters will not be changed -// in most cases and are necessary, such as task priority, timeout, max retries, and -// necessary information for resource estimation. -// Any changes to initialization parameters during task execution may cause unpredictable -// consequences. For example, changes in parameters that affect resource estimation may -// lead to OOM, etc. +// Task is an abstract interface to describe the smallest unit of SP service how to interact. type Task interface { // Key returns the uniquely identify of the task. It is recommended that each task // has its own prefix. In addition, it should also include the information of the // task's own identity. - // Example: - // the ApprovalTask maybe includes the bucket and object name, - // the ObjectTask maybe includes the object ID, - // the GCTask maybe includes the timestamp. + // For example: + // 1. ApprovalTask maybe includes the bucket name and object name, + // 2. ObjectTask maybe includes the object ID, + // 3. GCTask maybe includes the timestamp. Key() TKey // Type returns the type of the task. A task has a unique type, such as // TypeTaskCreateBucketApproval, TypeTaskUpload etc. has the only one TType @@ -149,9 +83,9 @@ type Task interface { SetError(error) } -// ApprovalTask is the interface to record the ask approval information, the -// approval task timeliness uses the block height, if reached expired height, -// the approval invalid. +// ApprovalTask is an abstract interface to record the ask approval information. +// ApprovalTask uses block height to verify whether the approval is expired. +// If reached expired height, the approval invalid. type ApprovalTask interface { Task // GetExpiredHeight returns the expired height of the approval. @@ -163,14 +97,14 @@ type ApprovalTask interface { SetExpiredHeight(uint64) } -// ApprovalCreateBucketTask is the interface to record the ask create bucket -// approval information. The user account will create MsgCreateBucket, the SP +// ApprovalCreateBucketTask is an abstract interface to record the ask create bucket approval information. +// The user account will create MsgCreateBucket, SP // should decide whether approved the request based on the MsgCreateBucket. -// If so, the sp will SetExpiredHeight and signs the MsgCreateBucket. +// If so, SP will SetExpiredHeight and signs the MsgCreateBucket. type ApprovalCreateBucketTask interface { ApprovalTask // InitApprovalCreateBucketTask inits the ApprovalCreateBucketTask by - // MsgCreateBucket and task priority. the SP only fill the MsgCreateBucket's + // MsgCreateBucket and task priority. SP only fill the MsgCreateBucket's // PrimarySpApproval field, can not change other fields. InitApprovalCreateBucketTask(*storagetypes.MsgCreateBucket, TPriority) // GetCreateBucketInfo returns the user's MsgCreateBucket. @@ -180,14 +114,14 @@ type ApprovalCreateBucketTask interface { SetCreateBucketInfo(*storagetypes.MsgCreateBucket) } -// ApprovalCreateObjectTask is the interface to record the ask create object -// approval information. The user account will create MsgCreateObject, the SP +// ApprovalCreateObjectTask is an abstract interface to record the ask create object +// approval information. The user account will create MsgCreateObject, SP // should decide whether approved the request based on the MsgCreateObject. -// If so, the sp will SetExpiredHeight and signs the MsgCreateObject. +// If so, SP will SetExpiredHeight and signs the MsgCreateObject. type ApprovalCreateObjectTask interface { ApprovalTask // InitApprovalCreateObjectTask inits the ApprovalCreateObjectTask by - // MsgCreateObject and task priority. the SP only fill the MsgCreateObject's + // MsgCreateObject and task priority. SP only fill the MsgCreateObject's // PrimarySpApproval field, can not change other fields. InitApprovalCreateObjectTask(*storagetypes.MsgCreateObject, TPriority) // GetCreateObjectInfo returns the user's MsgCreateObject. @@ -197,7 +131,7 @@ type ApprovalCreateObjectTask interface { SetCreateObjectInfo(*storagetypes.MsgCreateObject) } -// ApprovalReplicatePieceTask is the interface to record the ask replicate pieces +// ApprovalReplicatePieceTask is an abstract interface to record the ask replicate pieces // to other SPs(as secondary SP for the object). It is initiated by the primary SP // in the replicate pieces phase. Before the primary SP sends it to other SPs, the // primary SP will sign the task, other SPs will verify it is sent by a legitimate @@ -208,44 +142,44 @@ type ApprovalReplicatePieceTask interface { ApprovalTask // InitApprovalReplicatePieceTask inits the ApprovalReplicatePieceTask by ObjectInfo, // storage params, task priority and primary operator address. the storage params - // can affect the size of the data accepted by the secondary SP, so this is a necessary + // can affect the size of the data accepted by secondary SP, so this is a necessary // and cannot be changed parameter. InitApprovalReplicatePieceTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, priority TPriority, askOpAddress string) - // GetAskSpOperatorAddress returns the SP's operator address that initiated the ask + // GetAskSpOperatorAddress returns the operator address of SP that initiated the ask // replicate piece approval request. GetAskSpOperatorAddress() string - // SetAskSpOperatorAddress sets the SP's operator address that initiated the ask + // SetAskSpOperatorAddress sets the operator address of SP that initiated the ask // replicate piece approval request. Should try to avoid calling this method, // it will change the approval information. SetAskSpOperatorAddress(string) - // GetAskSignature returns the initiated SP's signature by its operator private key. + // GetAskSignature returns the initiated signature of SP signature by its operator private key. GetAskSignature() []byte - // SetAskSignature sets the initiated SP's signature by its operator private key. + // SetAskSignature sets the initiated signature of SP by its operator private key. SetAskSignature([]byte) - // GetApprovedSpOperatorAddress returns the approved SP's operator address. + // GetApprovedSpOperatorAddress returns the approved operator address of SP. GetApprovedSpOperatorAddress() string - // SetApprovedSpOperatorAddress sets the approved SP's operator address. + // SetApprovedSpOperatorAddress sets the approved operator address of SP. SetApprovedSpOperatorAddress(string) - // GetApprovedSignature returns the approved SP's signature. + // GetApprovedSignature returns the approved signature of SP. GetApprovedSignature() []byte - // SetApprovedSignature sets the approved SP's signature. + // SetApprovedSignature sets the approved signature of SP. SetApprovedSignature([]byte) - // GetApprovedSpEndpoint returns the approved SP's endpoint. It is used to replicate + // GetApprovedSpEndpoint returns the approved endpoint of SP. It is used to replicate // pieces to secondary SP. GetApprovedSpEndpoint() string - // SetApprovedSpEndpoint sets the approved SP's endpoint. + // SetApprovedSpEndpoint sets the approved endpoint of SP. SetApprovedSpEndpoint(string) - // GetApprovedSpApprovalAddress returns the approved SP's approval address. It is + // GetApprovedSpApprovalAddress returns the approved approval address of SP. It is // used to seal object on greenfield. GetApprovedSpApprovalAddress() string - // SetApprovedSpApprovalAddress sets the approved SP's approval address. + // SetApprovedSpApprovalAddress sets the approved approval address of SP. SetApprovedSpApprovalAddress(string) // GetSignBytes returns the bytes from the task for initiated and approved SPs // to sign. GetSignBytes() []byte } -// The ObjectTask associated with an object and storage params, and records the +// ObjectTask associated with an object and storage params, and records the // information of different stages of the object. Considering the change of storage // params on the greenfield, the storage params of each object should be determined // when it is created, and it should not be queried during the task flow, which is @@ -260,12 +194,11 @@ type ObjectTask interface { GetStorageParams() *storagetypes.Params // SetStorageParams sets the storage params.Should try to avoid calling this // method, it will change the task base information. - // Example: - // it will change resource estimate for UploadObjectTask and so on. + // For example: it will change resource estimate for UploadObjectTask and so on. SetStorageParams(*storagetypes.Params) } -// The UploadObjectTask is the interface to record the information for uploading object +// UploadObjectTask is an abstract interface to record the information for uploading object // payload data to the primary SP. type UploadObjectTask interface { ObjectTask @@ -273,7 +206,7 @@ type UploadObjectTask interface { InitUploadObjectTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, timeout int64) } -// The ReplicatePieceTask is the interface to record the information for replicating +// ReplicatePieceTask is an abstract interface to record the information for replicating // pieces of object pieces data to secondary SPs. type ReplicatePieceTask interface { ObjectTask @@ -300,7 +233,7 @@ type ReplicatePieceTask interface { SetSecondarySignatures([][]byte) } -// The ReceivePieceTask is the interface to record the information for receiving pieces +// ReceivePieceTask is an abstract interface to record the information for receiving pieces // of object payload data from primary SP, it exists only in secondary SP. type ReceivePieceTask interface { ObjectTask @@ -326,11 +259,11 @@ type ReceivePieceTask interface { GetPieceChecksum() []byte // SetPieceChecksum set the checksum of received piece data. SetPieceChecksum([]byte) - // GetSignature returns the primary SP's signature, because the InitReceivePieceTask - // will be transfer to secondary SP's, It is necessary to prove that the task was + // GetSignature returns the primary signature of SP, because the InitReceivePieceTask + // will be transfer to secondary SP, It is necessary to prove that the task was // sent by a legitimate SP. GetSignature() []byte - // SetSignature sets the primary SP's signature. + // SetSignature sets the primary signature of SP. SetSignature([]byte) // GetSignBytes returns the bytes from the task for primary SP to sign. GetSignBytes() []byte @@ -342,8 +275,7 @@ type ReceivePieceTask interface { SetSealed(bool) } -// The SealObjectTask is the interface to record the information for sealing object to -// the greenfield. +// SealObjectTask is an abstract interface to record the information for sealing object on Greenfield chain. type SealObjectTask interface { ObjectTask // InitSealObjectTask inits the SealObjectTask. @@ -356,7 +288,7 @@ type SealObjectTask interface { GetSecondarySignatures() [][]byte } -// The DownloadObjectTask is the interface to record the information for downloading +// DownloadObjectTask is an abstract interface to record the information for downloading // pieces of object payload data. type DownloadObjectTask interface { ObjectTask @@ -381,7 +313,7 @@ type DownloadObjectTask interface { GetHigh() int64 } -// The DownloadPieceTask is the interface to record the information for downloading piece data. +// DownloadPieceTask is an abstract interface to record the information for downloading piece data. type DownloadPieceTask interface { ObjectTask // InitDownloadPieceTask inits DownloadPieceTask. @@ -412,7 +344,7 @@ type DownloadPieceTask interface { GetPieceLength() uint64 } -// ChallengePieceTask is the interface to record the information for get challenge +// ChallengePieceTask is an abstract interface to record the information for get challenge // piece info, the validator get challenge info to confirm whether the sp stores // the user's data correctly. type ChallengePieceTask interface { @@ -451,14 +383,14 @@ type ChallengePieceTask interface { SetPieceDataSize(int64) } -// The GCTask is the interface to record the information of garbage collection. +// GCTask is an abstract interface to record the information of garbage collection. type GCTask interface { Task } -// The GCObjectTask is the interface to record the information for collecting the +// GCObjectTask is an abstract interface to record the information for collecting the // piece store space by deleting object payload data that the object has been deleted -// on the Greenfield chain. +// on Greenfield chain. type GCObjectTask interface { GCTask // InitGCObjectTask inits InitGCObjectTask. @@ -487,7 +419,7 @@ type GCObjectTask interface { SetGCObjectProgress(uint64, uint64) } -// The GCZombiePieceTask is the interface to record the information for collecting +// GCZombiePieceTask is an abstract interface to record the information for collecting // the piece store space by deleting zombie pieces data that dues to any exception, // the piece data meta is not on chain but the pieces has been store in piece store. type GCZombiePieceTask interface { @@ -500,7 +432,7 @@ type GCZombiePieceTask interface { SetGCZombiePieceStatus(uint64, uint64) } -// The GCMetaTask is the interface to record the information for collecting the SP +// GCMetaTask is an abstract interface to record the information for collecting the SP // meta store space by deleting the expired data. type GCMetaTask interface { GCTask diff --git a/core/taskqueue/README.md b/core/taskqueue/README.md index f7ad1353d..bcebebaf4 100644 --- a/core/taskqueue/README.md +++ b/core/taskqueue/README.md @@ -1,48 +1,40 @@ # Task Queue -Task is the interface to the smallest unit of SP background service interaction. -Task scheduling and execution are directly related to the order of task arrival, -so task queue is a relatively important basic interface used by all modules inside -SP. +Task is the interface to the smallest unit of SP background service interaction. Task scheduling and execution are directly +related to the order of task arrival, so task queue is a relatively important basic interface used by all modules inside SP. # Concept ## Task Queue With Limit -Task execution needs to consume certain resources. Different task types have large -differences in Memory, Bandwidth, and CPU consumption. The available resources of the -nodes executing the task are uneven. Therefore, resources need to be considered when -scheduling tasks. The `Task Queue With Limit` is to consider resources. +Task execution needs to consume certain resources. Different task types have large differences in Memory, Bandwidth, and +CPU consumption. The available resources of the nodes executing the task are uneven. Therefore, resources need to be considered +when scheduling tasks. The `Task Queue With Limit` is to consider resources. ## Task Queue Strategy -Conventional queues cannot fully meet the requirements of tasks. For example, the -retired strategy of tasks inside the queue, when the conventional queue is full, -it cannot be pushed any more, however, tasks that fail after retries may need to be -retired. For different types of task retired and pick up, etc. the strategies are -different, the `Task Queue Strategy` is an interface that supports custom strategies. +Conventional queues cannot fully meet the requirements of tasks. For example, the retired strategy of tasks inside the +queue, when the conventional queue is full, it cannot be pushed any more, however, tasks that fail after retries may need +to be retired. For different types of task retired and pick up, etc. the strategies are different, the `Task Queue Strategy` +is an interface that supports custom strategies. # Task Queue Types ## TQueue -TQueue is the interface to task queue. The task queue is mainly used to maintain tasks -are running. In addition to supporting conventional FIFO operations, task queue also -has some customized operations for task. For example, Has, PopByKey. +TQueue is the interface to task queue. The task queue is mainly used to maintain tasks are running. In addition to supporting +conventional FIFO operations, task queue also has some customized operations for task. For example, Has, PopByKey. ## TQueueWithLimit -TQueueWithLimit is the interface task queue that takes resources into account. Only -tasks with less than required resources can be popped out. +TQueueWithLimit is the interface task queue that takes resources into account. Only tasks with less than required resources can be popped out. ## TQueueOnStrategy -TQueueOnStrategy is a combination of TQueue and TQueueStrategy, it is the interface to -task queue and the queue supports customize strategies to filter task for popping and -retiring task. +TQueueOnStrategy is a combination of TQueue and TQueueStrategy, it is the interface to task queue and the queue supports +customize strategies to filter task for popping and retiring task. ## TQueueOnStrategyWithLimit -TQueueOnStrategyWithLimit is a combination of TQueueWithLimit and TQueueStrategy,it is -the interface to task queue that takes resources into account, and the queue supports -customize strategies to filter task for popping and retiring task. \ No newline at end of file +TQueueOnStrategyWithLimit is a combination of TQueueWithLimit and TQueueStrategy,it is the interface to task queue that +takes resources into account, and the queue supports customize strategies to filter task for popping and retiring task. diff --git a/docs/introduction/01-overview.md b/docs/introduction/01-overview.md deleted file mode 100644 index 152aca159..000000000 --- a/docs/introduction/01-overview.md +++ /dev/null @@ -1,39 +0,0 @@ -# Overview - -## What is the Greenfield Storage Provider - -Storage Providers (abbreviated SP) are storage service infrastructure providers. -They use Greenfield as the ledger and the single source of truth. Each SP can and -will respond to users' requests to write (upload) and read (download) data, and -serve as the gatekeeper for user rights and authentications. - -## Architecture - -
architecture.png
-
Storage Provider Architecture
- -- **Gateway** is the entry point of each SP. It parses requests from the client and dispatches them to special service. - -- **Uploader** receives the object's payload data, splits it into segments, and stores them in piece store. - -- **Downloader** handles the user's downloading request and gets object data from the piece store. - -- **Receiver** receives data pieces from Primary SP and stores them in the piece store when SP works as a secondary SP. - -- **Challenge** handles HA challenge requests and returns the challenged piece data and other pieces' hashes of the object. - -- **TaskNode** works as the execute unit, it watches tasks(the smallest unit of a job) and executes them. - -- **Manager** responsible for the service management of SP. - -- **Signer** signs the transaction messages to the Greenfield chain with the SP's private key. - -- **P2P** used to interact with the control flow of the payload data, eg: GetSecondaryApproval. - -- **Metadata** used to provide efficient query interface to achieve low latency and high-performance SP requirements. - -- **PieceStore** interacts with underlying storage vendors, eg. AWS S3, MinIO. - -- **SPDB** stores all the contexts of the background jobs and the metadata of the SP. - -- **BSDB** stores all the events' data from the greenfield chain and provides them to the metadata service of SP. \ No newline at end of file diff --git a/docs/introduction/overview.md b/docs/introduction/overview.md new file mode 100644 index 000000000..d0f61314f --- /dev/null +++ b/docs/introduction/overview.md @@ -0,0 +1,77 @@ +# Overview + +## What is the Greenfield Storage Provider + +Storage Provider (SP) is infrastructure provider for storage services. They work in synergy with Greenfield validators +to provide a complete storage service. Validators store metadata and financial ledgers with consensus, while SPs store +the actual data (payload data) of objects using the Greenfield chain as the ledger and single source of truth. SPs provide +a range of convenient services for users and dApps to manage data on Greenfield. + +## How the Greenfield Storage Providers works + +SPs need to register themselves firstly by depositing on the Greenfield blockchain as their "Service Stake". The Greenfield +validators will then conduct a governance procedure to vote to elect the SPs. When joining and leaving the network, SPs +must follow specific actions to ensure data redundancy for users, or they will face fines on their "Service Stake". + +SPs provide publicly accessible APIs that allow users to upload, download and manage data. These APIs are designed to be +similar to Amazon S3 APIs, making it easier for existing developers to write code for them. SPs are responsible for +responding to user requests to write (upload) and read (download) data, as well as managing user permissions and authentications. + +Each SP maintains its own local full node, allowing for a strong connection with the Greenfield network. This enables the +SP to directly monitor state changes, properly index data, send transaction requests in a timely manner and manage local data accurately. + +To encourage SPs to showcase their capabilities and provide a professional storage system with high-quality SLA, it is +recommended that they advertise their information and prove to the community. + +## Architecture + +
architecture.png
+
Storage Provider Architecture
+ +SP contains fifteen core modules as show below: + +- **Gater**: It serves as the gateway for SP, providing HTTP services and adhering to the S3 protocol. It generates tasks +- corresponding to user requests and forwards them to other modules within SP. Since Gater does not allow customization, +- no interface is defined in the modular file. + +- **Authorizer**: It is responsible for verifying authorization. + +- **Approver**: It is responsible for handling approval requests, specifically `CreateBucketApproval` and `CreateObjectApproval`. + +- **Uploader**: It handles the put object requests from user accounts and stores payload data into piece store of the primary SP. + +- **Downloader**: It is responsible for handling get object request from user account and get challenge info request from other components in the Greenfield system. + +- **TaskExecutor**: It is responsible for handling background task. This module can request tasks from the Manager module, execute them and report the results or status back to the Manager. + +- **Manager**: It is responsible for managing task scheduling of SP and other management functions. + +- **P2P**: It is responsible for handling the interaction of control information between SPs. It handles ask replicate piece approval requests by broadcasting the approval to other SPs, waiting for responses and returning the approvals if the minimum or maximum approved number is reached before the timeout. + +- **Receiver**: It receives data from the primary SP, calculates the integrity hash of the data, signs it, and returns it to the primary SP for sealing on a greenfield. + +- **Signer**: It handles the signing of the SP data on the greenfield chain operator and holds all of the SP's private keys. Due to the sequence number of the SP account, it must be a singleton. + +- **Metadata**: It is used to provide efficient query interface for meta info in SP. This module achieves low latency and high-performance SP requirements. + +- **BlockSyncer**: It records block in Greenfield blockchain. + +- **PieceStore**: It interacts with underlying storage vendors, eg. AWS S3, MinIO, etc. + +- **SPDB**: It stores all the contexts of the background jobs and the metadata of SP. + +- **BSDB**: It stores all the events' data from the greenfield chain and provides them to the metadata service of SP. + +## How to implement customized requirements in Greenfield SP + +From the code level, SP is not only an implementation layer, it has been expanded into a framework called `GfSp`, which allows users to customize their own requirements according to their own needs. If users want to implement some specific functions, you can override these methods that are declared in the abstract interfaces. If users don't need to implement customized requirements, `GfSp` will use default implementations. There are nine important layers of abstraction: + +- [lifecycle](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/lifecycle): It provides two abstract interfaces to manage services lifecycle: register services, start services, listens to the signal and graceful shutdown. +- [module](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/module): It provides multiple abstract interfaces to interact with different modules in GfSp. Therefore, users can implement replated methods to meet your own requirements. +- [consensus](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/consensus): It is provides abstract interfaces about how to query data on Greenfield blockchain. +- [piecestore](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/piecestore): It is used to interact with underlying storage systems. +- [spdb](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/spdb): It provides abstract interfaces about how to store background tasks and metadata of SP. +- [bsdb](https://github.com/bnb-chain/greenfield-storage-provider/blob/master/core/bsdb): It provides abstract interfaces about how to query metadata in SP. +- [rcmgr](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/rcmgr): It provides abstract interfaces about managing resources. +- [task](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/task): It provides abstract interfaces about the smallest uint for interacting with SP background services. +- [taskqueue](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/core/taskqueue): It provides abstract interfaces about task scheduling and executing. diff --git a/docs/modules/01-piece_store.md b/docs/modules/01-piece_store.md deleted file mode 100644 index eddee5cbc..000000000 --- a/docs/modules/01-piece_store.md +++ /dev/null @@ -1,92 +0,0 @@ -# PieceStore - -## Vision - -Write once, run on every storage service. - -## Goal - -- Vendor-agnostic -- Production ready -- High performance -- High availability - -## Overview - -The core function of PieceStore module is to be compatible with multiple object storage or KV storage such as S3, GCS, Azure Blob, Minio, Ceph, IPFS, DiskFile, Memory, etc. Different SP can use different object storage without caring about the underlying implementation. - -
-
PieceStore Architecture
- -[PieceStore code](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/store/piecestore) - -### API Interfaces - -PieceStore provides encapsulating interfaces for upper-layer services to use. -Now these APIs are called by local pakcage functions. We provide four apis to visit PieceStore as follows: - -```go -func (p *PieceStore) Get(ctx context.Context, key string, offset, limit int64) (io.ReadCloser, error) - -// Put one piece to PieceStore -func (p *PieceStore) Put(ctx context.Context, key string, reader io.Reader) error - -// Delete one piece in PieceStore -func (p *PieceStore) Delete(ctx context.Context, key string) error - -// GetPieceInfo returns piece info in PieceStore -func (p *PieceStore) GetPieceInfo(ctx context.Context, key string) (storage.Object, error) -``` - -In the future, upper-layer services could visit PieceStore through HTTP, RPC or P2P which is more decentralized. - -### Sharding - -PieceStore provides sharding function for data high availability. PieceStore uses `fnv` algorithm to shard piece data. If users want to use data sharding, you can configure `Shards = a(a is a number which 2 <= a <= 256)` in config.toml. - -**Note** The current implementation of sharding can only be used for multiple buckets in one region. The support of multi-region would be added in the future which will be more higher availability. - -### Compatibile With Multi Object Storage - -PieceStore is vendor-agnostic, so it will be compatibile with multi object storage. Now SP supports based storage such as `S3, MinIO, DiskFile and Memory`. -Recommend using S3 or MinIO in production environment and [the releated config document is here](https://github.com/bnb-chain/greenfield-storage-provider/blob/master/store/piecestore/README.md). Users can experience PieceStore in local by DiskFile or Memory. - -The common interface is as follows: - -```go -// ObjectStorage is a common interface that must be implemented if some users want to use an object -// storage (such as S3, Azure Blob, Minio, OSS, COS, etc) -type ObjectStorage interface { - // String the description of an object storage - String() string - // CreateBucket create the bucket if not existed - CreateBucket(ctx context.Context) error - // GetObject gets data for the given object specified by key - GetObject(ctx context.Context, key string, offset, limit int64) (io.ReadCloser, error) - // PutObject puts data read from a reader to an object specified by key - PutObject(ctx context.Context, key string, reader io.Reader) error - // DeleteObject deletes an object - DeleteObject(ctx context.Context, key string) error - - // HeadBucket determines if a bucket exists and have permission to access it - HeadBucket(ctx context.Context) error - // HeadObject returns some information about the object or an error if not found - HeadObject(ctx context.Context, key string) (Object, error) - // ListObjects lists returns a list of objects - ListObjects(ctx context.Context, prefix, marker, delimiter string, limit int64) ([]Object, error) - // ListAllObjects returns all the objects as a channel - ListAllObjects(ctx context.Context, prefix, marker string) (<-chan Object, error) -} -``` - -### Outlook - -PieceStore provides some fundamental functions: wrapped API interfaces, sharding and compatibile with multi object storage. However, there are more functions to be added in the future. - -1. Data Cache - -PieceStore is combined with object storage, cache is an important component for interacting efficiently between the local client and remote services. Read and write data can be loaded into cache in advance or asynchronously. Using caching technology can significantly reduce the latency of storag operations and increase data throughput compared to interact with remote services directly. - -2. GC Module - -There will be some useless data stored in remote object storage which will occupy users' data space and generate extra costs. GC module will garbage useless data according to context. This module will reduce users' data space and costs. diff --git a/docs/modules/03-sp_db.md b/docs/modules/03-sp_db.md deleted file mode 100644 index 5680fd498..000000000 --- a/docs/modules/03-sp_db.md +++ /dev/null @@ -1,174 +0,0 @@ -# SP DB(Storage Provider Database) - -SP store needs to implement [SPDB](../../store/sqldb/database.go) interface. SQL database(MySQL) is used by default. -The following mainly introduces the data schemas corresponding to several core interfaces. - -## JobContext - -JobContext records the context of uploading an payload data, it contains two tables: JobTable and ObjectTable. - -### Job Table - -JobTable describes some important data about job type and job state. Every operation in SP is a job which drives by state machine. - -Below is the schema of `Jobtable`: - -```go -// JobTable table schema -type JobTable struct { - // JobID defines the unique id of a job - JobID uint64 `gorm:"primary_key;autoIncrement"` - // JobType defines the type of a job - JobType int32 - // JobState defines the state of a job - JobState int32 - // JobErrorCode defines the error code when a job abnormal termination - JobErrorCode uint32 - // CreatedTime defines the job create time, used to jobs garbage collection - CreatedTime time.Time - // ModifiedTime defines the job last modified time, used to judge timeout - ModifiedTime time.Time -} -``` - -Below is the enum of `Jobtype and JobState`: - -```protobuf -enum JobType { - // default job type - JOB_TYPE_UNSPECIFIED = 0; - // upload object - JOB_TYPE_UPLOAD_OBJECT = 1; - // delete object - JOB_TYPE_DELETE_OBJECT = 2; -} - -enum JobState { - // default job state - JOB_STATE_INIT_UNSPECIFIED = 0; - - // uploading payload data to primary SP - JOB_STATE_UPLOAD_OBJECT_DOING = 1; - // upload payload data to primary SP has done - JOB_STATE_UPLOAD_OBJECT_DONE = 2; - // failed to upload primary SP - JOB_STATE_UPLOAD_OBJECT_ERROR = 3; - - // alloc secondary SPs is doing - JOB_STATE_ALLOC_SECONDARY_DOING = 4; - // alloc secondary SPs has done - JOB_STATE_ALLOC_SECONDARY_DONE = 5; - // failed to alloc secondary SPs - JOB_STATE_ALLOC_SECONDARY_ERROR = 6; - - // replicating payload data to secondary SPs - JOB_STATE_REPLICATE_OBJECT_DOING = 7; - // replicate payload data to secondary SPs has done - JOB_STATE_REPLICATE_OBJECT_DONE = 8; - // failed to replicate payload data to secondary SPs - JOB_STATE_REPLICATE_OBJECT_ERROR = 9; - - // signing seal object transaction - JOB_STATE_SIGN_OBJECT_DOING = 10; - // sign seal object transaction has done - JOB_STATE_SIGN_OBJECT_DONE = 11; - // failed to sign seal object transaction - JOB_STATE_SIGN_OBJECT_ERROR = 12; - - // seal object transaction is doing on chain - JOB_STATE_SEAL_OBJECT_DOING = 13; - // seal object transaction has done - JOB_STATE_SEAL_OBJECT_DONE = 14; - // failed to run seal object transaction - JOB_STATE_SEAL_OBJECT_ERROR = 15; -} -``` - -### Object Table - -ObjectTable stores basic information about an upload object metadata. - -Below is the schema of `ObjectTable`: - -```go -// ObjectTable table schema -type ObjectTable struct { - // ObjectID defines the unique ID of an obejct - ObjectID uint64 `gorm:"primary_key"` - // JobID defines the unique id of a job. - JobID uint64 `gorm:"index:job_to_object"` - // Owner defines the owner of an object - Owner string - // BucketName deinfes the bucket name to which an object belongs - BucketName string - // ObjectName defines the object name - ObjectName string - // PayloadSize defines the obejct size - PayloadSize uint64 - // IsPublic defines an object is public - IsPublic bool - // ContentType defines an obejct content type - ContentType string - // CreatedAtHeight defines an obejct created at which chain height - CreatedAtHeight int64 - // ObjectStatus defines object status - ObjectStatus int32 - // RedundancyType defines the redundancy type of an object used - RedundancyType int32 - // SourceType defines the source type of an object - SourceType int32 - // SpIntegrityHash defines sp inetgirty hash - SpIntegrityHash string - // SecondarySpAddresses defines secondary sp addresses - SecondarySpAddresses string -} -``` - -Below is the enum of `RedundancyType, ObjectStatus and SourceType`: - -```protobuf -enum RedundancyType { - // default redundancy type is replica - REDUNDANCY_REPLICA_TYPE = 0; - // redundancy type is ec - REDUNDANCY_EC_TYPE = 1; - // redundancy type is inline type - REDUNDANCY_INLINE_TYPE = 2; -} - -enum ObjectStatus { - // default object status is initialized - OBJECT_STATUS_INIT = 0; - // object status is in service - OBJECT_STATUS_IN_SERVICE = 1; -} - -enum SourceType { - // default source type that object is origin - SOURCE_TYPE_ORIGIN = 0; - // object is from bsc cross chain - SOURCE_TYPE_BSC_CROSS_CHAIN = 1; -} -``` - -## Integrity - -For each object there are some pieces root hashes stored on greenfield chain to keep data integrity. And for the pieces of an object stored on a specific SP, the SP keeps these pieces' hashes, which are used for storage proof. - -### Integrity Table - -Below is the schema of `IntegrityMetaTable`: - -```go -// IntegrityMetaTable table schema -type IntegrityMetaTable struct { - // ObjectID defines the unique ID of an obejct - ObjectID uint64 `gorm:"primary_key"` - // PieceHashList defines the piece hash list of an obejct by using sha256 - PieceHashList string - // IntegrityHash defines the integrity hash of an object - IntegrityHash string - // Signature defines the signature of an obejct's IntegrityHash by using Secondary SP's private key - Signature string -} -``` diff --git a/docs/modules/04-bs_db.md b/docs/modules/04-bs_db.md deleted file mode 100644 index 21d402fce..000000000 --- a/docs/modules/04-bs_db.md +++ /dev/null @@ -1,198 +0,0 @@ -# BS DB(Block Syncer Database) - -BS (Block Syncer Database) store needs to implement [BSDB](../../store/bsdb/database.go) interface. SQL database is used by default. -The following mainly introduces the data schemas corresponding to several core interfaces. - -```go -// BSDB contains all the methods required by block syncer database -type BSDB interface { - Metadata -} -``` - -## Block Syncer - -Block Syncer retrieves the historical data on the chain, optimizes the storage, and re-structures it according to the different event data types. - -### Object Table - -An object represents a fundamental unit of storage in Greenfield, BSDB object table consists of only associated metadata synced from chain side. - -Below is the schema of `Object`: - -```go -// Object is the structure for user object -type Object struct { - // ID defines db auto_increment id of object - ID uint64 `gorm:"id"` - // Creator defines the account address of object creator - Creator common.Address `gorm:"creator_address"` - // Owner defines the account address of object owner - Owner common.Address `gorm:"column:owner_address"` - // BucketName is the name of the bucket - BucketName string `gorm:"bucket_name"` - // ObjectName is the name of object - ObjectName string `gorm:"object_name"` - // ObjectID is the unique identifier of object - ObjectID common.Hash `gorm:"object_id"` - // BucketID is the unique identifier of bucket - BucketID common.Hash `gorm:"bucket_id"` - // PayloadSize is the total size of the object payload - PayloadSize uint64 `gorm:"payload_size"` - // Visibility defines the highest permissions for bucket. When a bucket is public, everyone can get storage obj - Visibility string `gorm:"visibility"` - // ContentType defines the format of the object which should be a standard MIME type - ContentType string `gorm:"content_type"` - // CreateAt defines the block number when the object created - CreateAt int64 `gorm:"create_at"` - // CreateTime defines the timestamp when the object created - CreateTime int64 `gorm:"create_time"` - // ObjectStatus defines the upload status of the object. - ObjectStatus string `gorm:"column:status"` - // RedundancyType defines the type of the redundancy which can be multi-replication or EC - RedundancyType string `gorm:"redundancy_type"` - // SourceType defines the source of the object. - SourceType string `gorm:"source_type"` - // CheckSums defines the root hash of the pieces which stored in an SP - Checksums pq.ByteaArray `gorm:"check_sums;type:text"` - // SecondarySpAddresses defines the addresses of secondary_sps - SecondarySpAddresses pq.StringArray `gorm:"secondary_sp_addresses;type:text"` - // LockedBalance defines locked balance of object - LockedBalance common.Hash `gorm:"locked_balance"` - // Removed defines the object is deleted or not - Removed bool `gorm:"removed"` - // UpdateTime defines the time when the object updated - UpdateTime int64 `gorm:"update_time"` - // UpdateAt defines the block number when the object updated - UpdateAt int64 `gorm:"update_at"` -} -``` - -Below is the enum of `Visibility, RedundancyType, ObjectStatus and SourceType`: - -```protobuf -// VisibilityType is the resources public status. -enum VisibilityType { - VISIBILITY_TYPE_UNSPECIFIED = 0; - VISIBILITY_TYPE_PUBLIC_READ = 1; - VISIBILITY_TYPE_PRIVATE = 2; - // If the bucket Visibility is inherit, it's finally set to private. If the object Visibility is inherit, it's the same as bucket. - VISIBILITY_TYPE_INHERIT = 3; -} -// RedundancyType represents the redundancy algorithm type for object data, -// which can be either multi-replica or erasure coding. -enum RedundancyType { - REDUNDANCY_EC_TYPE = 0; - REDUNDANCY_REPLICA_TYPE = 1; -} -// ObjectStatus represents the creation status of an object. After a user successfully -// sends a CreateObject transaction onto the chain, the status is set to 'Created'. -// After the Primary Service Provider successfully sends a Seal Object transaction onto -// the chain, the status is set to 'Sealed'. -enum ObjectStatus { - OBJECT_STATUS_CREATED = 0; - OBJECT_STATUS_SEALED = 1; -} -// SourceType represents the source of resource creation, which can -// from Greenfield native or from a cross-chain transfer from BSC -enum SourceType { - SOURCE_TYPE_ORIGIN = 0; - SOURCE_TYPE_BSC_CROSS_CHAIN = 1; - SOURCE_TYPE_MIRROR_PENDING = 2; -} -``` - -### Bucket Table - -A bucket serves as a logical container for storing objects in Greenfield. -The Bucket table describes the db structure of bucket, and it provides additional storage information compared to the schema on the chain side. e.g. Removed defines the bucket is deleted or not - -Below is the schema of `Bucket`: - -```go -// Bucket is the structure for user bucket -type Bucket struct { - // ID defines db auto_increment id of bucket - ID uint64 `gorm:"id"` - // Owner is the account address of bucket creator, it is also the bucket owner. - Owner common.Address `gorm:"column:owner_address"` - // BucketName is a globally unique name of bucket - BucketName string `gorm:"bucket_name"` - // Visibility defines the highest permissions for bucket. When a bucket is public, everyone can get storage obj - Visibility string `gorm:"visibility"` - // ID is the unique identification for bucket. - BucketID common.Hash `gorm:"bucket_id"` - // SourceType defines which chain the user should send the bucket management transactions to - SourceType string `gorm:"source_type"` - // CreateAt defines the block number when the bucket created. - CreateAt int64 `gorm:"create_at"` - // CreateTime defines the timestamp when the bucket created - CreateTime int64 `gorm:"create_time"` - // PaymentAddress is the address of the payment account - PaymentAddress common.Address `gorm:"payment_address"` - // PrimarySpAddress is the address of the primary sp. Objects belong to this bucket will never - // leave this SP, unless you explicitly shift them to another SP. - PrimarySpAddress common.Address `gorm:"primary_sp_address"` - // ReadQuota defines the traffic quota for read - ChargedReadQuota uint64 `gorm:"charged_read_quota"` - // PaymentPriceTime defines price time of payment - PaymentPriceTime int64 `gorm:"payment_price_time"` - // Removed defines the bucket is deleted or not - Removed bool `gorm:"removed"` -} -``` - -the enum of `Visibility and SourceType` are the same as above - -### Epoch Table - -The Epoch table describes the latest progress of block event information. - -Below is the schema of `Epoch`: - -```go -// Epoch stores current information of the latest block -type Epoch struct { - // OneRowID defines if the table only has one row - OneRowID bool `gorm:"one_row_id;not null;default:true;primaryKey"` - // BlockHeight defines the latest block number - BlockHeight int64 `gorm:"block_height;type:bigint(64)"` - // BlockHash defines the latest block hash - BlockHash common.Hash `gorm:"block_hash;type:BINARY(32)"` - // UpdateTime defines the update time of the latest block - UpdateTime int64 `gorm:"update_time;type:bigint(64)"` -} -``` - -### StreamRecord Table - -The StreamRecord table describes the stream payment record of a stream account. - -Below is the schema of `StreamRecord`: - -```go -type StreamRecord struct { - // ID defines db auto_increment id of stream record - ID uint64 `gorm:"id"` - // Account defines the account address - Account common.Address `gorm:"account"` - // CrudTimestamp defines the latest update timestamp of the stream record - CrudTimestamp int64 `gorm:"crud_timestamp"` - // NetflowRate defines the per-second rate that an account's balance is changing. - // It is the sum of the account's inbound and outbound flow rates. - NetflowRate *common.Big `gorm:"netflow_rate"` - // StaticBalance defines the balance of the stream account at the latest CRUD timestamp. - StaticBalance *common.Big `gorm:"static_balance"` - // BufferBalance defines reserved balance of the stream account - // If the netflow rate is negative, the reserved balance is `netflow_rate * reserve_time` - BufferBalance *common.Big `gorm:"buffer_balance"` - // LockBalance defines the locked balance of the stream account after it puts a new object and before the object is sealed - LockBalance *common.Big `gorm:"lock_balance"` - // Status defines the status of the stream account - Status string `gorm:"status"` - // SettleTimestamp defines the unix timestamp when the stream account will be settled - SettleTimestamp int64 `gorm:"column:settle_timestamp"` - // OutFlows defines the accumulated outflow rates of the stream account - OutFlows []byte `gorm:"out_flows;type:longblob"` -} -``` \ No newline at end of file diff --git a/docs/modules/approver.md b/docs/modules/approver.md new file mode 100644 index 000000000..6939dcfc1 --- /dev/null +++ b/docs/modules/approver.md @@ -0,0 +1,57 @@ +# Approver + +Approver module is used to handle approval requests including `CreateBucketApproval` and `CreateObjectApproval`. The workflow of Approver users can refer [GetApproval](../workflow/workflow.md#get-approval). We currently abstract SP as the GfSp framework, which provides users with customizable capabilities to meet their specific requirements. Approver module provides an abstract interface, which is called `Approver`, as follows: + +Approver is an abstract interface to handle ask approval requests. + +```go +type Approver interface { + Modular + // PreCreateBucketApproval prepares to handle CreateBucketApproval, it can do some checks such as checking for duplicates, if limitation of SP has been reached, etc. + PreCreateBucketApproval(ctx context.Context, task task.ApprovalCreateBucketTask) error + // HandleCreateBucketApprovalTask handles the CreateBucketApproval, it can set expired height, sign the MsgCreateBucket and so on. + HandleCreateBucketApprovalTask(ctx context.Context, task task.ApprovalCreateBucketTask) (bool, error) + // PostCreateBucketApproval is called after HandleCreateBucketApprovalTask, it can recycle resources, make statistics and do some other operations. + PostCreateBucketApproval(ctx context.Context, task task.ApprovalCreateBucketTask) + + // PreCreateObjectApproval prepares to handle CreateObjectApproval, it can do some checks such as check for duplicates, if limitation of SP has been reached, etc. + PreCreateObjectApproval(ctx context.Context, task task.ApprovalCreateObjectTask) error + // HandleCreateObjectApprovalTask handles the CreateObjectApproval, it can set expired height, sign the MsgCreateObject and so on. + HandleCreateObjectApprovalTask(ctx context.Context, task task.ApprovalCreateObjectTask) (bool, error) + // PostCreateObjectApproval is called after HandleCreateObjectApprovalTask, it can recycle resources, make statistics and do some other operations. + PostCreateObjectApproval(ctx context.Context, task task.ApprovalCreateObjectTask) + // QueryTasks queries tasks that running on approver by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) +} +``` + +Approver interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so Approver module can be managed by lifycycle and resource manager. + +In terms of the functions provided by Approver module, it can be divided into two parts: CreateBucketApproval and CreateObjectApproval. Both CreateBucketApproval and CreateObjectApproval have three methods: PreXXX, HanldeXXX and PostXXX. Therefore, if you can rewrite these methods to meet your own requirements. + +As we can see from the second parameter of the methods defined in `Approver` interface, bucketApproval is splitted into `ApprovalCreateBucketTask` and objectApproval is splitted into `ApprovalCreateObjectTask`. They are also defined as an interface. + +We can query ApprovalCreateBucket and ApprovalCreateObject tasks that we care about by `QueryTasks` method through using subKey. + +## ApprovalCreateBucketTask and ApprovalCreateObjectTask + +ApprovalTask is used to record approval information for users creating buckets and objects. Primary SP approval is required before serving the bucket and object. If the SP approves the message, it will sign the approval message. The greenfield will verify the signature of the approval message to determine whether the SP accepts the bucket and object. ApprovalTask includes `ApprovalCreateBucketTask` and `ApprovalCreateBucketTask`. + +The corresponding interfaces definition is shown below: + +- [ApprovalTask](./common/task.md#approvaltask) +- [ApprovalCreateBucketTask](./common/task.md#approvalcreatebuckettask) +- [ApprovalCreateObjectTask](./common/task.md#approvalcreateobjecttask) + +ApprovalTask interface inherits [Task interface](./common/task.md#task), it describes what operations does a Task have. You can overwrite all these methods in your own. + +The corresponding `protobuf` definition is shown below: + +- [GfSpCreateBucketApprovalTask](./common/proto.md#gfspcreatebucketapprovaltask-proto) +- [GfSpCreateObjectApprovalTask](./common/proto.md#gfspcreateobjectapprovaltask-proto) +- [MsgCreateBucket](./common/proto.md#msgcreatebucket-proto) +- [MsgCreateObject](./common/proto.md#msgcreateobject-proto) + +## GfSp Framework Approver Code + +Approver module code implementation: [Approver](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/approver) diff --git a/docs/modules/authenticator.md b/docs/modules/authenticator.md new file mode 100644 index 000000000..e4b01fbc4 --- /dev/null +++ b/docs/modules/authenticator.md @@ -0,0 +1,207 @@ +# Authenticator + +Authenticator module is used to verify users authentication. Each request arrived to SP gateway requires authentication. SP uses authentication to know who you are. + +We currently abstract SP as the GfSp framework, which provides users with customizable capabilities to meet their specific requirements. Authenticator module provides an abstract interface, which is called `Authenticator`, as follows: + +```go +// Authenticator is an abstract interface to verify users authentication. +type Authenticator interface { + Modular + // VerifyAuthentication verifies the operator authentication. + VerifyAuthentication(ctx context.Context, auth AuthOpType, account, bucket, object string) (bool, error) + // GetAuthNonce get the auth nonce for which the dApp or client can generate EDDSA key pairs. + GetAuthNonce(ctx context.Context, account string, domain string) (*spdb.OffChainAuthKey, error) + // UpdateUserPublicKey updates the user public key once the dApp or client generates the EDDSA key pairs. + UpdateUserPublicKey(ctx context.Context, account string, domain string, currentNonce int32, nonce int32, + userPublicKey string, expiryDate int64) (bool, error) + // VerifyOffChainSignature verifies the signature signed by user's EDDSA private key. + VerifyOffChainSignature(ctx context.Context, account string, domain string, offChainSig string, realMsgToSign string) (bool, error) +} + +// AuthOpType defines the operator type used to verify authentication. +type AuthOpType int32 + +const ( + // AuthOpTypeUnKnown defines the default value of AuthOpType + AuthOpTypeUnKnown AuthOpType = iota + // AuthOpAskCreateBucketApproval defines the AskCreateBucketApproval operator + AuthOpAskCreateBucketApproval + // AuthOpAskCreateObjectApproval defines the AskCreateObjectApproval operator + AuthOpAskCreateObjectApproval + // AuthOpTypeGetChallengePieceInfo defines the GetChallengePieceInfo operator + AuthOpTypeGetChallengePieceInfo + // AuthOpTypePutObject defines the PutObject operator + AuthOpTypePutObject + // AuthOpTypeGetObject defines the GetObject operator + AuthOpTypeGetObject + // AuthOpTypeGetUploadingState defines the GetUploadingState operator + AuthOpTypeGetUploadingState + // AuthOpTypeGetBucketQuota defines the GetBucketQuota operator + AuthOpTypeGetBucketQuota + // AuthOpTypeListBucketReadRecord defines the ListBucketReadRecord operator + AuthOpTypeListBucketReadRecord +) +``` + +Authenticator interface inherits [Modular interface](./lifecycle_modular.md#modular-interface), so Approver module can be managed by lifycycle and resource manager. + +You can overwrite `VerifyAuthentication` to implement your own authentication mode by different AuthOpType. This is the most basic authentication. + +## Off-Chain Authentication + +### Abstract + +This document outlines an off-chain authentication specification for greenfield storage providers (SPs) and clients. The specification includes a full functional workflow and a reference implementation, making it easy for any application integrating with greenfield SPs to build an off-chain authentication mechanism. + +### Motivation + +Applications based on the greenfield chain often need to interact with multiple greenfield SPs, which are off-chain services that require users to use Ethereum-compatible accounts to represent their identities. + +For most interactions between applications and SPs, users' identities are required. Typically, applications can use message signing via account private keys to authenticate users, as long as they have access to their private keys. However, for browser-based applications, accessing the end users' private keys directly is not possible, making it necessary to prompt users to sign messages for each off-chain request between applications and SPs. This results in a poor user experience. + +This document describes a workflow to address this problem. + +### WorkFlow + +#### Overall workflow + +![](../../../asset/015-Auth-Overview.png) + +#### Step 1 - Generate EdDSA key pairs in Apps + +Applications can design how to generate EdDSA key pairs themselves, and SPs do not have any restrictions on it. + +Here is one example. + +1. For each Ethereum address, the SP counts how many times the account public key has been updated for a given app domain since registration. This value is called the "key nonce" and denoted as n. It starts from 0 and increments by 1 after each successful account public key update. The app can invoke the [SP API "request_nonce"]() to retrieve the nonce value n. A simple request to get the nonce is: + +```sh +curl --location 'https://${SP_API_ADDRESS}/auth/request_nonce' \ +--header 'X-Gnfd-User-Address: 0x3d0a49B091ABF8940AD742c0139416cEB30CdEe0' \ +--header 'X-Gnfd-App-Domain: https://greenfield_app1.domain.com' +``` + +The response is: + +```json +{ + "current_nonce": 0, + "next_nonce": 1, + "current_public_key": "", + "expiry_date": "" +} +``` + +Since we are trying to build a new key pairs, we will use next_nonce value as n. + +2. The app puts `n` and `sp addresses` into a constant strings: + +```plain +Sign this message to let dapp ${dapp_domain} access the following SPs: +- SP ${SP_ADDRESS_1} (name:${SP_NAME_1}) with nonce:${NONCE_1} +- SP ${SP_ADDRESS_2} (name:${SP_NAME_2}) with nonce:${NONCE_2} +... +``` + +We denote the new string as `M` + +3. The app then requests the user to sign M with their Ethereum ECDSA private key, then gets the signature S. +4. The app uses sha256(S) as the seed to generate the EdDSA key pairs EdDSA_private_K and EdDSA_public_K. +5. The app saves EdDSA_private_K as plain text in the browser's session storage and registers EdDSA_public_K as the account public key into the SP servers. + +#### Step 2 - Register EdDSA public key in SPs + +For each combination of user address and app domain, the SP backend maintains a key nonce `n`. It starts from 0 and increments by 1 after each successful account key update. + + +To register an account public key into a certain SP, you can invoke [SP API "update\_key"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/update_key.html). + +Here is an example. Suppose that + +1. The **user account address** is `0x3d0a49B091ABF8940AD742c0139416cEB30CdEe0` +2. The **app domain** is `https://greenfield_app1.domain.com` +3. The **nonce** for above user address and app domain from [SP API "request\_nonce"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/get_nonce.html) is `1` +4. The **SP operator address** is `0x70d1983A9A76C8d5d80c4cC13A801dc570890819` +5. The **EdDSA\_public\_K** is `4db642fe6bc2ceda2e002feb8d78dfbcb2879d8fe28e84e02b7a940bc0440083` +6. The **expiry time** for this `EdDSA_public_K` is `2023-04-28T16:25:24Z`. The expiry time indicates the expiry time of this `EdDSA_public_K` , which should be a future time and within **7 days.** + +The app will put above information into a text message: + +```plain +https://greenfield_app1.domain.com wants you to sign in with your BNB Greenfield account:\n0x3d0a49B091ABF8940AD742c0139416cEB30CdEe0\n\nRegister your identity public key 4db642fe6bc2ceda2e002feb8d78dfbcb2879d8fe28e84e02b7a940bc0440083\n\nURI: https://greenfield_app1.domain.com\nVersion: 1\nChain ID: 5600\nIssued At: 2023-04-24T16:25:24Z\nExpiration Time: 2023-04-28T16:25:24Z\nResources:\n- SP 0x70d1983A9A76C8d5d80c4cC13A801dc570890819 (name: SP_001) with nonce: 1 +``` + +We denote this text message as `M2` + +and request user to sign and get the signature`S2`: + +![](../../../asset/015-Auth-Update-Key-Metamask.png) + +Finally, the app invokes [SP API "update\_key"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/update_key.html) by putting `S2` into http Authorization header. The following is an example: + +```plain +curl --location --request POST 'https://${SP_API_ADDRESS}/auth/update_key' \ +--header 'Origin: https://greenfield_app1.domain.com' \ +--header 'X-Gnfd-App-Domain: https://greenfield_app1.domain.com' \ +--header 'X-Gnfd-App-Reg-Nonce: 1' \ +--header 'X-Gnfd-App-Reg-Public-Key: 4db642fe6bc2ceda2e002feb8d78dfbcb2879d8fe28e84e02b7a940bc0440083' \ +--header 'X-Gnfd-App-Reg-Expiry-Date: 2023-04-28T16:25:24Z' \ +--header 'Authorization: PersonalSign ECDSA-secp256k1,SignedMsg=https://greenfield_app1.domain.com wants you to sign in with your BNB Greenfield account:\n0x3d0a49B091ABF8940AD742c0139416cEB30CdEe0\n\nRegister your identity public key 4db642fe6bc2ceda2e002feb8d78dfbcb2879d8fe28e84e02b7a940bc0440083\n\nURI: https://greenfield_app1.domain.com\nVersion: 1\nChain ID: 5600\nIssued At: 2023-04-24T16:25:24Z\nExpiration Time: 2023-04-28T16:25:24Z\nResources:\n- SP 0x70d1983A9A76C8d5d80c4cC13A801dc570890819 (name: SP_001) with nonce: 1,Signature=0x8663c48cfecb611d64540d3b653f51ef226f3f674e2c390ea9ca45746b22a4f839a15576b5b4cc1051183ae9b69ac54160dc3241bbe99c695a52fe25eaf2f8c01b' +``` + +Once the response code returns 200, you can check if the new account public key is saved into this SP by invoking [SP API "request\_nonce"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/get_nonce.html) +This API returns the latest key nonce for a given user address and app domain. + +If the API returns the new key nonce, the account public key has been successfully registered into the SP servers. The app can now use the EdDSA key pair generated in Step 1 to authenticate the user in future interactions with the SP. + +#### Step 3 - Use EdDSA seed to sign request and verification + +In Step1 & Step2, we generated EdDSA keys and registered them into SP. In Step3, we can use `EdDSA_private_K` to sign request when an app invokes a certain SP API. + +To sign a request, the app needs to define a customized text message with a recent expiry timestamp (denoted as `EdDSA_M`) and use `EdDSA_private_K` to sign this message to get the signature `EdDSA_S`. + +The text message format is `${actionContent}_${expiredTimestamp}`. + +For example, if a user clicks the "download" button in an app to download a private object they own, this will invoke the SP getObject API. +The `EdDSA_M` could be defined as `Invoke_GetObject_1682407345000`, and the `EdDSA_S` would be `a48fff140b148369a108611502acff919720b5493aa36ba0886d8d73634ee20404963b847104d06aa822cf904741aff70ede4ba7d70fa8808c3206d4c93be623`. + +To combine `EdDSA_M` and `EdDSA_S`, the app should include them in the Authorization header when invoking the GetObject API: + +```plain +curl --location 'https://${SP_API_ADDRESS}/${bucket_name}/${object_name}' \ +--header 'authorization: OffChainAuth EDDSA,SignedMsg=Invoke_GetObject_1682407345000,Signature=a48fff140b148369a108611502acff919720b5493aa36ba0886d8d73634ee20404963b847104d06aa822cf904741aff70ede4ba7d70fa8808c3206d4c93be623' \ +--header 'X-Gnfd-User-Address: 0x3d0a49B091ABF8940AD742c0139416cEB30CdEe0' \ +--header 'X-Gnfd-App-Domain: https://greenfield_app1.domain.com' +``` + +By including the signed message and signature in the Authorization header, the app can authenticate the request with the SP servers. The SP servers can then verify the signature using the EdDSA_public_K registered in Step 2. + +#### Step 4 - Manage EdDSA key pairs + +Although we defined an expiry date for registered `EdDSA_public_K`, users might want to know how many EdDSA keys they are currently using and might want to delete them for security concerns. + +To list a user's registered EdDSA account public keys in an SP, apps can invoke [SP API "list\_key"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/list_key.html). +To delete a user's registered EdDSA account public key in an SP, apps can invoke [SP API "delete\_key"](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth/delete_key.html) + +#### Auth API Specification + +See [SP Auth Rest API Doc](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/auth) + +### Rational + +### Security Considerations + +#### Preventing replay attacks + +To prevent replay attacks, which are man-in-the-middle attacks in which an attacker is able to capture the user's signature and resend it to establish a new session for themselves, the following measures should be taken: + +* A new `nonce` should be selected each time when EdDSA keys are generated. This ensures that each generated key pair is unique and cannot be replayed. + +* When using EdDSA_private_K to sign a request, a recent timestamp as the expiry date must be included. This ensures that the signed message is only valid for a limited time and cannot be reused in a replay attack. + +By implementing these measures, the app can minimize the risk of replay attacks and ensure the security of the user's data and interactions with the SP servers. + +## GfSp Framework Authenticator Code + +Authenticator module code implementation: [Authenticator](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/authenticator) diff --git a/docs/modules/common/lifecycle_modular.md b/docs/modules/common/lifecycle_modular.md new file mode 100644 index 000000000..ebd834d36 --- /dev/null +++ b/docs/modules/common/lifecycle_modular.md @@ -0,0 +1,92 @@ +# Common Abstract Interface + +Every service implements Lifecycle and Modular interface. Therefore, we can So we can do a unified lifecycle and resource management through GfSp framework. + +## Lifecycle Interface + +Lifecycle interface manages the lifecycle of a service and tracks its state changes. It also listens for signals from the process to ensure a graceful shutdown. + +Service is an interface for Lifecycle to manage. The component that plans to use Lifecycle needs to implement the following interface: + +```go +// Service provides abstract methods to control the lifecycle of a service +// Every service must implement Service interface. +type Service interface { + // Name describe service name + Name() string + // Start a service, this method should be used in non-block form + Start(ctx context.Context) error + // Stop a service, this method should be used in non-block form + Stop(ctx context.Context) error +} + +// Lifecycle is an interface to describe how service is managed. +// The Lifecycle tracks the Service lifecycle, listens for signals from +// the process to ensure a graceful shutdown. +// +// All managed services must firstly call RegisterServices to register with Lifecycle. +type Lifecycle interface { + // RegisterServices registers service to ServiceLifecycle for managing. + RegisterServices(modular ...Service) + // StartServices starts all registered services by calling Service.Start method. + StartServices(ctx context.Context) Lifecycle + // StopServices stops all registered services by calling Service.Stop method. + StopServices(ctx context.Context) + // Signals listens the system signals for gracefully stop the registered services. + Signals(sigs ...os.Signal) Lifecycle + // Wait waits the signal for stopping the ServiceLifecycle, before stopping + // the ServiceLifecycle will call StopServices stops all registered services. + Wait(ctx context.Context) +} +``` + +- [Lifecycle Code Snippet](https://github.com/bnb-chain/greenfield-storage-provider/blob/master/core/lifecycle/lifecycle.go) + +## Modular Interface + +```go +// Modular is a common interface for submodules that are scheduled by the GfSp framework. +// It inherits lifecycle.Service interface, which is used to manage lifecycle of services. Additionally, Modular is managed +// by ResourceManager, which allows the GfSp framework to reserve and release resources from the Modular resource pool. +type Modular interface { + lifecycle.Service + // ReserveResource reserves the resources from Modular resources pool. + ReserveResource(ctx context.Context, state *rcmgr.ScopeStat) (rcmgr.ResourceScopeSpan, error) + // ReleaseResource releases the resources to Modular resources pool. + ReleaseResource(ctx context.Context, scope rcmgr.ResourceScopeSpan) +} +``` + +- [Modular Code Snippet](https://github.com/bnb-chain/greenfield-storage-provider/blob/master/core/module/modular.go) + +## Limit + +Limit is an interface that that specifies basic resource limits. + +```go +type Limit interface { + // GetMemoryLimit returns the (current) memory limit. + GetMemoryLimit() int64 + // GetFDLimit returns the file descriptor limit. + GetFDLimit() int + // GetConnLimit returns the connection limit, for inbound or outbound connections. + GetConnLimit(Direction) int + // GetConnTotalLimit returns the total connection limit. + GetConnTotalLimit() int + // GetTaskLimit returns the task limit, for high, medium and low priority tasks. + GetTaskLimit(ReserveTaskPriority) int + // GetTaskTotalLimit returns the total task limit. + GetTaskTotalLimit() int + ScopeStat() *ScopeStat + // NotLess returns an indicator whether cover the param limit fields. + NotLess(Limit) bool + // Add params limits fields value to self. + Add(Limit) + // Sub params limits fields value to self. + Sub(Limit) bool + // Equal returns an indicator whether equal the param limit. + Equal(Limit) bool + // String returns the Limit state string. + String() string +} +``` diff --git a/docs/modules/common/proto.md b/docs/modules/common/proto.md new file mode 100644 index 000000000..2c5f5d7c1 --- /dev/null +++ b/docs/modules/common/proto.md @@ -0,0 +1,446 @@ +# Proto Definition + +GfSp framework uses protobuf to define structured data which is language-neutral, platform-neutral and extensible mechanism for serializing data. This section will display used protobuf definiton in GfSp code. + +## GfSpTask Proto + +Tasks in GfSp uses proto to describe themselves. + +```proto +message GfSpTask { + string address = 1; + int64 create_time = 2; + int64 update_time = 3; + int64 timeout = 4; + int32 task_priority = 5; + int64 retry = 6; + int64 max_retry = 7; + base.types.gfsperrors.GfSpError err = 8; +} +``` + +### GfSpCreateBucketApprovalTask Proto + +```proto +message GfSpCreateBucketApprovalTask { + GfSpTask task = 1; + greenfield.storage.MsgCreateBucket create_bucket_info = 2; +} +``` + +### GfSpCreateObjectApprovalTask Proto + +```proto +message GfSpCreateObjectApprovalTask { + GfSpTask task = 1; + greenfield.storage.MsgCreateObject create_object_info = 2; +} +``` + +### GfSpReplicatePieceApprovalTask Proto + +```proto +message GfSpReplicatePieceApprovalTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.Params storage_params = 3; + string ask_sp_operator_address = 4; + bytes ask_signature = 5; + string approved_sp_endpoint = 6; + string approved_sp_operator_address = 7; + bytes approved_signature = 8; + string approved_sp_approval_address = 9; + uint64 expired_height = 10; +} +``` + +### GfSpUploadObjectTask Proto + +```proto +message GfSpUploadObjectTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.Params storage_params = 3; +} +``` + +### GfSpReplicatePieceTask Proto + +```proto +message GfSpReplicatePieceTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.Params storage_params = 3; + repeated bytes secondary_signature = 4; + bool sealed = 5; +} +``` + +### GfSpReceivePieceTask Proto + +```proto +message GfSpReceivePieceTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.Params storage_params = 3; + uint32 replicate_idx = 4; + int32 piece_idx = 5; + int64 piece_size = 6; + bytes piece_checksum = 7; + bytes signature = 8; + bool sealed = 9; +} +``` + +### GfSpSealObjectTask Proto + +```proto +message GfSpSealObjectTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.Params storage_params = 3; + repeated bytes secondary_signature = 4; +} +``` + +### GfSpDownloadObjectTask Proto + +```proto +message GfSpDownloadObjectTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.BucketInfo bucket_info = 3; + greenfield.storage.Params storage_params = 4; + string user_address = 5; + int64 low = 6; + int64 high = 7; +} +``` + +### GfSpDownloadPieceTask Proto + +```proto +message GfSpDownloadPieceTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.BucketInfo bucket_info = 3; + greenfield.storage.Params storage_params = 4; + bool enable_check = 5; // check read quota, only in first piece + string user_address = 6; + uint64 total_size = 7; + string piece_key = 8; + uint64 piece_offset = 9; + uint64 piece_length = 10; +} +``` + +### GfSpChallengePieceTask Proto + +```proto +message GfSpChallengePieceTask { + GfSpTask task = 1; + greenfield.storage.ObjectInfo object_info = 2; + greenfield.storage.BucketInfo bucket_info = 3; + greenfield.storage.Params storage_params = 4; + string user_address = 5; + uint32 segment_idx = 6; + int32 redundancy_idx = 7; + bytes integrity_hash = 8; + repeated bytes piece_hash = 9; + int64 piece_data_size = 10; +} +``` + +### GfSpGCObjectTask Proto + +```proto +message GfSpGCObjectTask { + GfSpTask task = 1; + uint64 start_block_number = 2; + uint64 end_block_number = 3; + uint64 current_block_number = 4; + uint64 last_deleted_object_id = 5; + bool running = 6; +} +``` + +### GfSpGCZombiePieceTask Proto + +```proto +message GfSpGCZombiePieceTask { + GfSpTask task = 1; + uint64 object_id = 2; + uint64 delete_count = 3; + bool running = 4; +} +``` + +### GfSpGCMetaTask Proto + +```proto +message GfSpGCMetaTask { + GfSpTask task = 1; + uint64 current_idx = 2; + uint64 delete_count = 3; + bool running = 4; +} +``` + +## Greenfield Proto + +Some structured data used in GfSp is deinfed in Greenfield chain repo, we display them as follows. + +### MsgCreateBucket Proto + +```proto +message MsgCreateBucket { + option (cosmos.msg.v1.signer) = "creator"; + + // creator defines the account address of bucket creator, it is also the bucket owner. + string creator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name defines a globally unique name of bucket + string bucket_name = 2; + // visibility means the bucket is private or public. if private, only bucket owner or grantee can read it, + // otherwise every greenfield user can read it. + VisibilityType visibility = 3; + // payment_address defines an account address specified by bucket owner to pay the read fee. Default: creator + string payment_address = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // primary_sp_address defines the address of primary sp. + string primary_sp_address = 6 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // primary_sp_approval defines the approval info of the primary SP which indicates that primary sp confirm the user's request. + Approval primary_sp_approval = 7; + // charged_read_quota defines the read data that users are charged for, measured in bytes. + // The available read data for each user is the sum of the free read data provided by SP and + // the ChargeReadQuota specified here. + uint64 charged_read_quota = 8; +} +``` + +### MsgCreateObject Proto + +```proto +message MsgCreateObject { + option (cosmos.msg.v1.signer) = "creator"; + + // creator defines the account address of object uploader + string creator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name defines the name of the bucket where the object is stored. + string bucket_name = 2; + // object_name defines the name of object + string object_name = 3; + // payload_size defines size of the object's payload + uint64 payload_size = 4; + // visibility means the object is private or public. if private, only object owner or grantee can access it, + // otherwise every greenfield user can access it. + VisibilityType visibility = 5; + // content_type defines a standard MIME type describing the format of the object. + string content_type = 6; + // primary_sp_approval defines the approval info of the primary SP which indicates that primary sp confirm the user's request. + Approval primary_sp_approval = 7; + // expect_checksums defines a list of hashes which was generate by redundancy algorithm. + repeated bytes expect_checksums = 8; + // redundancy_type can be ec or replica + RedundancyType redundancy_type = 9; + // expect_secondarySPs defines a list of StorageProvider address, which is optional + repeated string expect_secondary_sp_addresses = 10 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +} +``` + +### BucketInfo Proto + +```proto +message BucketInfo { + // owner is the account address of bucket creator, it is also the bucket owner. + string owner = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name is a globally unique name of bucket + string bucket_name = 2; + // visibility defines the highest permissions for bucket. When a bucket is public, everyone can get storage objects in it. + VisibilityType visibility = 3; + // id is the unique identification for bucket. + string id = 4 [ + (cosmos_proto.scalar) = "cosmos.Uint", + (gogoproto.customtype) = "Uint", + (gogoproto.nullable) = false + ]; + // source_type defines which chain the user should send the bucket management transactions to + SourceType source_type = 5; + // create_at define the block timestamp when the bucket created. + int64 create_at = 6; + // payment_address is the address of the payment account + string payment_address = 7 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // primary_sp_address is the address of the primary sp. Objects belongs to this bucket will never + // leave this SP, unless you explicitly shift them to another SP. + string primary_sp_address = 8 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // charged_read_quota defines the traffic quota for read in bytes per month. + // The available read data for each user is the sum of the free read data provided by SP and + // the ChargeReadQuota specified here. + uint64 charged_read_quota = 9; + // billing info of the bucket + BillingInfo billing_info = 10 [(gogoproto.nullable) = false]; + // bucket_status define the status of the bucket. + BucketStatus bucket_status = 11; +} +``` + +### ObjectInfo Proto + +```proto +message ObjectInfo { + string owner = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name is the name of the bucket + string bucket_name = 2; + // object_name is the name of object + string object_name = 3; + // id is the unique identifier of object + string id = 4 [ + (cosmos_proto.scalar) = "cosmos.Uint", + (gogoproto.customtype) = "Uint", + (gogoproto.nullable) = false + ]; + // payloadSize is the total size of the object payload + uint64 payload_size = 5; + // visibility defines the highest permissions for object. When an object is public, everyone can access it. + VisibilityType visibility = 6; + // content_type define the format of the object which should be a standard MIME type. + string content_type = 7; + // create_at define the block timestamp when the object is created + int64 create_at = 8; + // object_status define the upload status of the object. + ObjectStatus object_status = 9; + // redundancy_type define the type of the redundancy which can be multi-replication or EC. + RedundancyType redundancy_type = 10; + // source_type define the source of the object. + SourceType source_type = 11; + // checksums define the root hash of the pieces which stored in a SP. + // add omit tag to omit the field when converting to NFT metadata + repeated bytes checksums = 12 [(gogoproto.moretags) = "traits:\"omit\""]; + // secondary_sp_addresses define the addresses of secondary_sps + repeated string secondary_sp_addresses = 13 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +} +``` + +### Params Proto + +Params defines the parameters for the module. + +```proto +message Params { + option (gogoproto.goproto_stringer) = false; + VersionedParams versioned_params = 1 [(gogoproto.nullable) = false]; + + // max_payload_size is the maximum size of the payload, default: 2G + uint64 max_payload_size = 2; + // relayer fee for the mirror bucket tx + string mirror_bucket_relayer_fee = 3; + // relayer fee for the ACK or FAIL_ACK package of the mirror bucket tx + string mirror_bucket_ack_relayer_fee = 4; + // relayer fee for the mirror object tx + string mirror_object_relayer_fee = 5; + // Relayer fee for the ACK or FAIL_ACK package of the mirror object tx + string mirror_object_ack_relayer_fee = 6; + // relayer fee for the mirror object tx + string mirror_group_relayer_fee = 7; + // Relayer fee for the ACK or FAIL_ACK package of the mirror object tx + string mirror_group_ack_relayer_fee = 8; + // The maximum number of buckets that can be created per account + uint32 max_buckets_per_account = 9; + // The window to count the discontinued objects or buckets + uint64 discontinue_counting_window = 10; + // The max objects can be requested in a window + uint64 discontinue_object_max = 11; + // The max buckets can be requested in a window + uint64 discontinue_bucket_max = 12; + // The object will be deleted after the confirm period in seconds + int64 discontinue_confirm_period = 13; + // The max delete objects in each end block + uint64 discontinue_deletion_max = 14; + // The max number for deleting policy in each end block + uint64 stale_policy_cleanup_max = 15; +} +``` + +### GfSpPing Proto + +Ping defines the heartbeat request between p2p nodes. + +```proto +message GfSpPing { + // sp_operator_address define sp operator public key + string sp_operator_address = 1; + // signature define the signature of sp sign the msg + bytes signature = 2; +} +``` + +### GfSpPong Proto + +Pong defines the heartbeat response between p2p nodes. + +```proto +message GfSpPong { + // nodes define the + repeated GfSpNode nodes = 1; + // sp_operator_address define sp operator public key + string sp_operator_address = 2; + // signature define the signature of sp sign the msg + bytes signature = 3; +} + +// Node defines the p2p node info +message GfSpNode { + // node_id defines the node id + string node_id = 1; + // multi_addr define the node multi addr + repeated string multi_addr = 2; +} +``` + +### MsgSealObject + +```proto +message MsgSealObject { + option (cosmos.msg.v1.signer) = "operator"; + + // operator defines the account address of primary SP + string operator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name defines the name of the bucket where the object is stored. + string bucket_name = 2; + // object_name defines the name of object to be sealed. + string object_name = 3; + // secondary_sp_addresses defines a list of storage provider which store the redundant data. + repeated string secondary_sp_addresses = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // secondary_sp_signatures defines the signature of the secondary sp that can + // acknowledge that the payload data has received and stored. + repeated bytes secondary_sp_signatures = 5; +} +``` + +### MsgRejectSealObject Proto + +```proto +message MsgRejectSealObject { + option (cosmos.msg.v1.signer) = "operator"; + + // operator defines the account address of the object owner + string operator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name defines the name of the bucket where the object is stored. + string bucket_name = 2; + // object_name defines the name of unsealed object to be reject. + string object_name = 3; +} +``` + +### MsgDiscontinueBucket + +```proto +message MsgDiscontinueBucket { + option (cosmos.msg.v1.signer) = "operator"; + + // operator is the sp who wants to stop serving the bucket. + string operator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // bucket_name defines the name of the bucket where the object which to be discontinued is stored. + string bucket_name = 2; + // the reason for the request. + string reason = 3; +} +``` diff --git a/docs/modules/common/task.md b/docs/modules/common/task.md new file mode 100644 index 000000000..287011ffe --- /dev/null +++ b/docs/modules/common/task.md @@ -0,0 +1,638 @@ +# Task + +Task is an abstract interface to describe the smallest unit of SP background service how to interact. + +```go +type Task interface { + // Key returns the uniquely identify of the task. It is recommended that each task + // has its own prefix. In addition, it should also include the information of the + // task's own identity. + // For example: + // 1. ApprovalTask maybe includes the bucket name and object name, + // 2. ObjectTask maybe includes the object ID, + // 3. GCTask maybe includes the timestamp. + Key() TKey + // Type returns the type of the task. A task has a unique type, such as + // TypeTaskCreateBucketApproval, TypeTaskUpload etc. has the only one TType + // definition. + Type() TType + // GetAddress returns the task runner address. there is only one runner at the + // same time, which will assist in quickly locating the running node of the task. + GetAddress() string + // SetAddress sets the runner address to the task. + SetAddress(string) + // GetCreateTime returns the creation time of the task. The creation time used to + // judge task execution time. + GetCreateTime() int64 + // SetCreateTime sets the creation time of the tas. + SetCreateTime(int64) + // GetUpdateTime returns the last updated time of the task. The updated time used + // to determine whether the task is expired with the timeout. + GetUpdateTime() int64 + // SetUpdateTime sets last updated time of the task. Any changes in task information + // requires to set the update time. + SetUpdateTime(int64) + // GetTimeout returns the timeout of the task, the timeout is a duration, if update + // time adds timeout lesser now stands the task is expired. + GetTimeout() int64 + // SetTimeout sets timeout duration of the task. + SetTimeout(int64) + // ExceedTimeout returns an indicator whether timeout, if update time adds timeout + // lesser now returns true, otherwise returns false. + ExceedTimeout() bool + // GetMaxRetry returns the max retry times of the task. Each type of task has a + // fixed max retry times. + GetMaxRetry() int64 + // SetMaxRetry sets the max retry times of the task. + SetMaxRetry(int64) + // GetRetry returns the retry counter of the task. + GetRetry() int64 + // SetRetry sets the retry counter of the task. + SetRetry(int) + // IncRetry increases the retry counter of the task. Each task has the max retry + // times, if retry counter exceed the max retry, the task should be canceled. + IncRetry() + // ExceedRetry returns an indicator whether retry counter greater that max retry. + ExceedRetry() bool + // Expired returns an indicator whether ExceedTimeout and ExceedRetry. + Expired() bool + // GetPriority returns the priority of the task. Each type of task has a fixed + // priority. The higher the priority, the higher the urgency of the task, and + // it will be executed first. + GetPriority() TPriority + // SetPriority sets the priority of the task. In most cases, the priority of the + // task does not need to be set, because the priority of the task corresponds to + // the task type one by one. Once the task type is determined, the priority is + // determined. But some scenarios need to dynamically adjust the priority of the + // task type, then this interface is needed. + SetPriority(TPriority) + // EstimateLimit returns estimated resource will be consumed. It is used for + // application resources to the rcmgr and decide whether it can be executed + // immediately. + EstimateLimit() rcmgr.Limit + // Info returns the task detail info for log and debug. + Info() string + // Error returns the task error. if the task is normal, returns nil. + Error() error + // SetError sets the error to task. Any errors that occur during task execution + // will be logged through the SetError method. + SetError(error) +} +``` + +## Task Type + +There are three main types of task: `ApprovalTask`, `ObjectTask` and `GCTask`. + +`ApprovalTask` is used to record approval information for users creating buckets and objects. Primary SP approval is required +before serving the bucket and object. If SP approves the message, it will sign the approval message. The greenfield will +verify the signature of the approval message to determine whether the SP accepts the bucket and object. When primary replicating +pieces to secondary SPs, the approval message is broadcast to other SPs. If they approve the message, the primary SP will +select some of them to replicate the pieces to. Before receiving the pieces, the selected SPs will verify the signature +of the approval message. `ApprovalTask` includes `ApprovalCreateBucketTask`, `ApprovalCreateBucketTask` and `ApprovalReplicatePieceTask`. + +`ObjectTask` is associated with an object and records information about its different stages. This includes `UploadObjectTask` +which uploads the object payload data to the primary SP, `ReplicatePieceTask`, which replicates the object pieces to the +secondary SPs, and `ReceivePieceTask`, which is exclusive to the secondary SP and records information about receiving +the piece. The secondary SP uses this information to confirm whether the object was successfully sealed on greenfield, +ensuring a return of the secondary SP. SealObjectTask seals the object on Greenfield, while the DownloadObjectTask allows +the user to download part or all of the object payload data. ChallengePieceTask provides the validator with challenge piece +information, which they can use to challenge the SP if they suspect that the user's payload data was not stored correctly. + +`GCTask` is an abstract interface that records information about garbage collection. This includes `GCObjectTask` which collects +piece store space by deleting payload data that has been deleted on the greenfield, `GCZombiePieceTask` which collects piece +store space by deleting zombie piece data that resulted from any exception where the piece data meta is not on Greenfield +chain, and `GCMetaTask` which collects the SP meta store space by deleting expired data. + +### ApprovalTask + +ApprovalTask is an abstract interface to record the ask approval information, the approval task timeliness uses the block height, +if reached expired height, the approval is invalid. + +```go +type ApprovalTask interface { + Task + // GetExpiredHeight returns the expired height of the approval. + GetExpiredHeight() uint64 + // SetExpiredHeight sets the expired height of the approval, when SP + // approved the approval, it should set the expired height to stands + // the approval timeliness. This is one of the ways SP prevents being + // attacked. + SetExpiredHeight(uint64) +} +``` + +#### ApprovalCreateBucketTask + +ApprovalCreateBucketTask is an abstract interface to record the ask create bucket approval information. The user account will +create MsgCreateBucket, the SP should decide whether approved the request based on the MsgCreateBucket. If so, the sp will +SetExpiredHeight and signs MsgCreateBucket. + +```go +type ApprovalCreateBucketTask interface { + ApprovalTask + // InitApprovalCreateBucketTask inits the ApprovalCreateBucketTask by + // MsgCreateBucket and task priority. SP only fill the MsgCreateBucket's + // PrimarySpApproval field, can not change other fields. + InitApprovalCreateBucketTask(*storagetypes.MsgCreateBucket, TPriority) + // GetCreateBucketInfo returns the user's MsgCreateBucket. + GetCreateBucketInfo() *storagetypes.MsgCreateBucket + // SetCreateBucketInfo sets the MsgCreateBucket. Should try to avoid calling + // this method, it will change the approval information. + SetCreateBucketInfo(*storagetypes.MsgCreateBucket) +} +``` + +#### ApprovalCreateObjectTask + +ApprovalCreateObjectTask is an abstract interface to record the ask create object approval information. The user account will +create MsgCreateObject, the SP should decide whether approved the request based on the MsgCreateObject. If so, the sp will +SetExpiredHeight and signs MsgCreateObject. + +```go +type ApprovalCreateObjectTask interface { + ApprovalTask + // InitApprovalCreateObjectTask inits the ApprovalCreateObjectTask by + // MsgCreateObject and task priority. SP only fill the MsgCreateObject's + // PrimarySpApproval field, can not change other fields. + InitApprovalCreateObjectTask(*storagetypes.MsgCreateObject, TPriority) + // GetCreateObjectInfo returns the user's MsgCreateObject. + GetCreateObjectInfo() *storagetypes.MsgCreateObject + // SetCreateObjectInfo sets the MsgCreateObject. Should try to avoid calling + // this method, it will change the approval information. + SetCreateObjectInfo(*storagetypes.MsgCreateObject) +} +``` + +#### ApprovalReplicatePieceTask + +ApprovalReplicatePieceTask is an abstract interface to record the ask replicate pieces to other SPs(as secondary SP for the object). +It is initiated by the primary SP in the replicate pieces phase. Before the primary SP sends it to other SPs, the primary +SP will sign the task, other SPs will verify it is sent by a legitimate SP. If other SPs approved the approval, they will +SetExpiredHeight and signs ApprovalReplicatePieceTask. + +```go +type ApprovalReplicatePieceTask interface { + ObjectTask + ApprovalTask + // InitApprovalReplicatePieceTask inits the ApprovalReplicatePieceTask by ObjectInfo, + // storage params, task priority and primary operator address. the storage params + // can affect the size of the data accepted by secondary SP, so this is a necessary + // and cannot be changed parameter. + InitApprovalReplicatePieceTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, priority TPriority, askOpAddress string) + // GetAskSpOperatorAddress returns the operator address of SP that initiated the ask + // replicate piece approval request. + GetAskSpOperatorAddress() string + // SetAskSpOperatorAddress sets the operator address of SP that initiated the ask + // replicate piece approval request. Should try to avoid calling this method, + // it will change the approval information. + SetAskSpOperatorAddress(string) + // GetAskSignature returns the initiated signature of SP signature by its operator private key. + GetAskSignature() []byte + // SetAskSignature sets the initiated signature of SP by its operator private key. + SetAskSignature([]byte) + // GetApprovedSpOperatorAddress returns the approved operator address of SP. + GetApprovedSpOperatorAddress() string + // SetApprovedSpOperatorAddress sets the approved operator address of SP. + SetApprovedSpOperatorAddress(string) + // GetApprovedSignature returns the approved signature of SP. + GetApprovedSignature() []byte + // SetApprovedSignature sets the approved signature of SP. + SetApprovedSignature([]byte) + // GetApprovedSpEndpoint returns the approved endpoint of SP. It is used to replicate + // pieces to secondary SP. + GetApprovedSpEndpoint() string + // SetApprovedSpEndpoint sets the approved endpoint of SP. + SetApprovedSpEndpoint(string) + // GetApprovedSpApprovalAddress returns the approved approval address of SP. It is + // used to seal object on greenfield. + GetApprovedSpApprovalAddress() string + // SetApprovedSpApprovalAddress sets the approved approval address of SP. + SetApprovedSpApprovalAddress(string) + // GetSignBytes returns the bytes from the task for initiated and approved SPs + // to sign. + GetSignBytes() []byte +} +``` + +### ObjectTask + +The ObjectTask associated with an object and storage params, and records the information of different stages of the object. +Considering the change of storage params on the greenfield, the storage params of each object should be determined when +it is created, and it should not be queried during the task flow, which is inefficient and error-prone. + +```go +type ObjectTask interface { + Task + // GetObjectInfo returns the associated object. + GetObjectInfo() *storagetypes.ObjectInfo + // SetObjectInfo set the associated object. + SetObjectInfo(*storagetypes.ObjectInfo) + // GetStorageParams returns the storage params. + GetStorageParams() *storagetypes.Params + // SetStorageParams sets the storage params.Should try to avoid calling this + // method, it will change the task base information. + // For example: it will change resource estimate for UploadObjectTask and so on. + SetStorageParams(*storagetypes.Params) +} +``` + +#### UploadObjectTask + +The UploadObjectTask is an abstract interface to record the information for uploading object payload data to the primary SP. + +```go +type UploadObjectTask interface { + ObjectTask + // InitUploadObjectTask inits the UploadObjectTask by ObjectInfo and Params. + InitUploadObjectTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, timeout int64) +} +``` + +#### ReplicatePieceTask + +The ReplicatePieceTask is an abstract interface to record the information for replicating pieces of object payload data to secondary SPs. + +```go +type ReplicatePieceTask interface { + ObjectTask + // InitReplicatePieceTask inits the ReplicatePieceTask by ObjectInfo, params, + // task priority, timeout and max retry. + InitReplicatePieceTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, priority TPriority, timeout int64, retry int64) + // GetSealed returns an indicator whether successful seal object on greenfield + // after replicate pieces, it is an optimization method. ReplicatePieceTask and + // SealObjectTask are combined. Otherwise, the two tasks will be completed in + // two stages. If the combination is successful and the seal object is successful, + // the number of SealObjectTask can be reduced, saving resource overhead. + GetSealed() bool + // SetSealed sets the state successful seal object after replicating piece. + SetSealed(bool) + // GetSecondaryAddresses returns the secondary SP's addresses. It is used to + // generate MsgSealObject. + GetSecondaryAddresses() []string + // SetSecondaryAddresses sets the secondary SP's addresses. + SetSecondaryAddresses([]string) + // GetSecondarySignatures returns the secondary SP's signatures. It is used to + // generate MsgSealObject. + GetSecondarySignatures() [][]byte + // SetSecondarySignatures sets the secondary SP's signatures. + SetSecondarySignatures([][]byte) +} +``` + +#### ReceivePieceTask + +The ReceivePieceTask is an abstract interface to record the information for receiving pieces of object payload data from primary +SP, it exists only in secondary SP. + +```go +type ReceivePieceTask interface { + ObjectTask + // InitReceivePieceTask init the ReceivePieceTask. + InitReceivePieceTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, priority TPriority, + replicateIdx uint32, pieceIdx int32, pieceSize int64) + // GetReplicateIdx returns the replicate index. The replicate index identifies the + // serial number of the secondary SP for object piece copy. + GetReplicateIdx() uint32 + // SetReplicateIdx sets the replicate index. + SetReplicateIdx(uint32) + // GetPieceIdx returns the piece index. The piece index identifies the serial number + // of segment of object payload data for object piece copy. + GetPieceIdx() int32 + // SetPieceIdx sets the piece index. + SetPieceIdx(int32) + // GetPieceSize returns the received piece data size, it is used to resource estimate. + GetPieceSize() int64 + // SetPieceSize sets the received piece data size. + SetPieceSize(int64) + // GetPieceChecksum returns the checksum of received piece data, it is used to check + // the piece data is correct. + GetPieceChecksum() []byte + // SetPieceChecksum set the checksum of received piece data. + SetPieceChecksum([]byte) + // GetSignature returns the primary signature of SP, because the InitReceivePieceTask + // will be transfer to secondary SP, It is necessary to prove that the task was + // sent by a legitimate SP. + GetSignature() []byte + // SetSignature sets the primary signature of SP. + SetSignature([]byte) + // GetSignBytes returns the bytes from the task for primary SP to sign. + GetSignBytes() []byte + // GetSealed returns an indicator whether the object of receiving piece data is + // sealed on greenfield, the secondary SP has an incentive to confirm that otherwise + // it wastes its storage resources + GetSealed() bool + // SetSealed sets the object of receiving piece data whether is successfully sealed. + SetSealed(bool) +} +``` + +#### SealObjectTask + +The SealObjectTask is an abstract interface to record the information for sealing object to the Greenfield chain. + +```go +// SealObjectTask is an abstract interface to record the information for sealing object on Greenfield chain. +type SealObjectTask interface { + ObjectTask + // InitSealObjectTask inits the SealObjectTask. + InitSealObjectTask(object *storagetypes.ObjectInfo, params *storagetypes.Params, priority TPriority, addresses []string, + signatures [][]byte, timeout int64, retry int64) + // GetSecondaryAddresses return the secondary SP's addresses. + GetSecondaryAddresses() []string + // GetSecondarySignatures return the secondary SP's signature, it is used to generate + // MsgSealObject. + GetSecondarySignatures() [][]byte +} +``` + +#### DownloadObjectTask + +The DownloadObjectTask is an abstract interface to record the information for downloading pieces of object payload data. + +```go +type DownloadObjectTask interface { + ObjectTask + // InitDownloadObjectTask inits DownloadObjectTask. + InitDownloadObjectTask(object *storagetypes.ObjectInfo, bucket *storagetypes.BucketInfo, params *storagetypes.Params, + priority TPriority, userAddress string, low int64, high int64, timeout int64, retry int64) + // GetBucketInfo returns the BucketInfo of the download object. + // It is used to Query and calculate bucket read quota. + GetBucketInfo() *storagetypes.BucketInfo + // SetBucketInfo sets the BucketInfo of the download object. + SetBucketInfo(*storagetypes.BucketInfo) + // GetUserAddress returns the user account of downloading object. + // It is used to record the read bucket information. + GetUserAddress() string + // SetUserAddress sets the user account of downloading object. + SetUserAddress(string) + // GetSize returns the download payload data size, high - low + 1. + GetSize() int64 + // GetLow returns the start offset of download payload data. + GetLow() int64 + // GetHigh returns the end offset of download payload data. + GetHigh() int64 +} +``` + +#### DownloadPieceTask + +DownloadPieceTask is an abstract interface to record the information for downloading piece data. + +```go +type DownloadPieceTask interface { + ObjectTask + // InitDownloadPieceTask inits DownloadPieceTask. + InitDownloadPieceTask(object *storagetypes.ObjectInfo, bucket *storagetypes.BucketInfo, params *storagetypes.Params, + priority TPriority, enableCheck bool, userAddress string, totalSize uint64, pieceKey string, pieceOffset uint64, + pieceLength uint64, timeout int64, maxRetry int64) + // GetBucketInfo returns the BucketInfo of the download object. + // It is used to Query and calculate bucket read quota. + GetBucketInfo() *storagetypes.BucketInfo + // SetBucketInfo sets the BucketInfo of the download object. + SetBucketInfo(*storagetypes.BucketInfo) + // GetUserAddress returns the user account of downloading object. + // It is used to record the read bucket information. + GetUserAddress() string + // SetUserAddress sets the user account of downloading object. + SetUserAddress(string) + // GetSize returns the download payload data size. + GetSize() int64 + // GetEnableCheck returns enable_check flag. + GetEnableCheck() bool + // GetTotalSize returns total size. + GetTotalSize() uint64 + // GetPieceKey returns piece key. + GetPieceKey() string + // GetPieceOffset returns piece offset. + GetPieceOffset() uint64 + // GetPieceLength returns piece length. + GetPieceLength() uint64 +} +``` + +#### ChallengePieceTask + +ChallengePieceTask is an abstract interface to record the information for get challenge piece info, the validator get challenge +info to confirm whether the sp stores the user's data correctly. + +```go +type ChallengePieceTask interface { + ObjectTask + // InitChallengePieceTask inits InitChallengePieceTask. + InitChallengePieceTask(object *storagetypes.ObjectInfo, bucket *storagetypes.BucketInfo, params *storagetypes.Params, + priority TPriority, userAddress string, replicateIdx int32, segmentIdx uint32, timeout int64, retry int64) + // GetBucketInfo returns the BucketInfo of challenging piece + GetBucketInfo() *storagetypes.BucketInfo + // SetBucketInfo sets the BucketInfo of challenging piece + SetBucketInfo(*storagetypes.BucketInfo) + // GetUserAddress returns the user account of challenging object. + // It is used to record the read bucket information. + GetUserAddress() string + // SetUserAddress sets the user account of challenging object. + SetUserAddress(string) + // GetSegmentIdx returns the segment index of challenge piece. + GetSegmentIdx() uint32 + // SetSegmentIdx sets the segment index of challenge piece. + SetSegmentIdx(uint32) + // GetRedundancyIdx returns the replicate index of challenge piece. + GetRedundancyIdx() int32 + // SetRedundancyIdx sets the replicate index of challenge piece. + SetRedundancyIdx(idx int32) + // GetIntegrityHash returns the integrity hash of the object. + GetIntegrityHash() []byte + // SetIntegrityHash sets the integrity hash of the object. + SetIntegrityHash([]byte) + // GetPieceHash returns the hash of challenge piece. + GetPieceHash() [][]byte + // SetPieceHash sets the hash of challenge piece. + SetPieceHash([][]byte) + // GetPieceDataSize returns the data of challenge piece. + GetPieceDataSize() int64 + // SetPieceDataSize sets the data of challenge piece. + SetPieceDataSize(int64) +} +``` + +### GC Task + +GCTask is an abstract interface to record the information of garbage collection. + +```go +type GCTask interface { + Task +} +``` + +#### GCObjectTask + +The GCObjectTask is an abstract interface to record the information for collecting the piece store space by deleting object payload +data that the object has been deleted on Greenfield chain. + +```go +type GCObjectTask interface { + GCTask + // InitGCObjectTask inits InitGCObjectTask. + InitGCObjectTask(priority TPriority, start, end uint64, timeout int64) + // SetStartBlockNumber sets start block number for collecting object. + SetStartBlockNumber(uint64) + // GetStartBlockNumber returns start block number for collecting object. + GetStartBlockNumber() uint64 + // SetEndBlockNumber sets end block number for collecting object. + SetEndBlockNumber(uint64) + // GetEndBlockNumber returns end block number for collecting object. + GetEndBlockNumber() uint64 + // SetCurrentBlockNumber sets the collecting block number. + SetCurrentBlockNumber(uint64) + // GetCurrentBlockNumber returns the collecting block number. + GetCurrentBlockNumber() uint64 + // GetLastDeletedObjectId returns the last deleted ObjectID. + GetLastDeletedObjectId() uint64 + // SetLastDeletedObjectId sets the last deleted ObjectID. + SetLastDeletedObjectId(uint64) + // GetGCObjectProgress returns the progress of collecting object, returns the + // deleting block number and the last deleted object id. + GetGCObjectProgress() (uint64, uint64) + // SetGCObjectProgress sets the progress of collecting object, params stand + // the deleting block number and the last deleted object id. + SetGCObjectProgress(uint64, uint64) +} +``` + +#### GCZombiePieceTask + +The GCZombiePieceTask is an abstract interface to record the information for collecting the piece store space by deleting zombie +pieces data that dues to any exception, the piece data meta is not on chain but the pieces has been store in piece store. + +```go +type GCZombiePieceTask interface { + GCTask + // GetGCZombiePieceStatus returns the status of collecting zombie pieces, returns + // the last deleted object id and the number that has been deleted. + GetGCZombiePieceStatus() (uint64, uint64) + // SetGCZombiePieceStatus sets the status of collecting zombie pieces, param + // stands the last deleted object id and the has been deleted pieces number. + SetGCZombiePieceStatus(uint64, uint64) +} +``` + +#### GCMetaTask + +The GCMetaTask is an abstract interface to record the information for collecting the SP meta store space by deleting the expired data. + +```go +type GCMetaTask interface { + GCTask + // GetGCMetaStatus returns the status of collecting metadata, returns the last + // deleted object id and the number that has been deleted. + GetGCMetaStatus() (uint64, uint64) + // SetGCMetaStatus sets the status of collecting metadata, parma stands the last + // deleted object id and the number that has been deleted. + SetGCMetaStatus(uint64, uint64) +} +``` + +## Task Type Definition + +```go +// TType is enum type, it defines the type of task. +type TType int32 + +const ( + // TypeTaskUnknown defines the default task type. + TypeTaskUnknown TType = iota + // TypeTaskCreateBucketApproval defines the type of asking create bucket approval + // to primary SP task + TypeTaskCreateBucketApproval + // TypeTaskCreateObjectApproval defines the type of asking create object approval + // to primary SP task + TypeTaskCreateObjectApproval + // TypeTaskReplicatePieceApproval defines the type of asking create object approval + // to secondary SP task + TypeTaskReplicatePieceApproval + // TypeTaskUpload defines the type of uploading object to primary SP task. + TypeTaskUpload + // TypeTaskReplicatePiece defines the type of replicating pieces to secondary SPs task. + TypeTaskReplicatePiece + // TypeTaskSealObject defines the type of sealing object to the chain task. + TypeTaskSealObject + // TypeTaskReceivePiece defines the type of receiving pieces for secondary SP task. + TypeTaskReceivePiece + // TypeTaskDownloadObject defines the type of downloading object task. + TypeTaskDownloadObject + // TypeTaskDownloadPiece defines the type of downloading piece task. + TypeTaskDownloadPiece + // TypeTaskChallengePiece defines the type of challenging piece task. + TypeTaskChallengePiece + // TypeTaskGCObject defines the type of collecting object payload data task. + TypeTaskGCObject + // TypeTaskGCZombiePiece defines the type of collecting zombie piece task. + TypeTaskGCZombiePiece + // TypeTaskGCMeta defines the type of collecting SP metadata task. + TypeTaskGCMeta +) +``` + +## Task Priority + +Each type of task has a priority, the range of priority is [0, 255], the higher the priority, the higher the urgency to +be executed, the greater the probability of being executed by priority scheduling. + +```go +type TPriority uint8 + +const ( + // UnKnownTaskPriority defines the default task priority. + UnKnownTaskPriority TPriority = 0 + // UnSchedulingPriority defines the task priority that should be never scheduled. + UnSchedulingPriority TPriority = 0 + // MaxTaskPriority defines the max task priority. + MaxTaskPriority TPriority = 255 + // DefaultLargerTaskPriority defines the larger task priority. + DefaultLargerTaskPriority TPriority = 170 + // DefaultSmallerPriority defines the smaller task priority. + DefaultSmallerPriority TPriority = 85 +) +``` + +## Task Priority Level + +Task priority is divided into three levels, TLowPriorityLevel, TMediumPriorityLevel, THighPriorityLevel. The TLowPriorityLevel +default priority range is [0, 85), The TMediumPriorityLevel default priority range is [85, 170), The THighPriorityLevel +default priority range is [170, 256). When allocating for task execution resources from ResourceManager, the resources +are allocated according to task priority level, but not task priority, because task priority up to 256 levels, the task priority +level make resource management easier. + +```text +Example: + the resource limit configuration of task execution node : + [TasksHighPriority: 30, TasksMediumPriority: 20, TasksLowPriority: 2] + the executor of the task can run 30 high level tasks at the same time that the + task priority between [170, 255] + the executor of the task can run 20 medium level tasks at the same time that the + task priority between [85, 170) + the executor of the task can run 2 medium level tasks at the same time that the + task priority < 85 +``` + +```go +type TPriorityLevel int32 + +const ( + // TLowPriorityLevel defines the low task priority level. + TLowPriorityLevel TPriorityLevel = iota + // TMediumPriorityLevel defines the medium task priority level. + TMediumPriorityLevel + // THighPriorityLevel defines the high task priority level. + THighPriorityLevel +) +``` + +## Task Init + +Each task needs to call its InitXXXTask method before use. This method requires passing in the necessary parameters of +each type of task. These parameters will not be changed in most cases and are necessary, such as task priority, timeout, +max retries, and necessary information for resource estimation. + +Any changes to initialization parameters during task execution may cause unpredictable consequences. For example, changes +in parameters that affect resource estimation may cause OOM, etc. diff --git a/docs/modules/downloader.md b/docs/modules/downloader.md new file mode 100644 index 000000000..bc830f159 --- /dev/null +++ b/docs/modules/downloader.md @@ -0,0 +1,108 @@ +# Downloader + +Downloader is responsible for downloading object data (including range download) and challenge piece. The workflow of Uploader users can refer [Downloader](../workflow/workflow.md#downloader). We currently abstract SP as the GfSp framework, which provides users with customizable capabilities to meet their specific requirements. Downloader module provides an abstract interface, which is called `Downloader`, as follows: + +Downloader is an abstract interface to handle getting object requests from users' account, and getting challenge info requests from other components in the system. + +```go +type Downloader interface { + Modular + // PreDownloadObject prepares to handle DownloadObject, it can do some checks + // such as checking for duplicates, if limitation of SP has been reached, etc. + PreDownloadObject(ctx context.Context, task task.DownloadObjectTask) error + // HandleDownloadObjectTask handles the DownloadObject and get data from piece store. + HandleDownloadObjectTask(ctx context.Context, task task.DownloadObjectTask) ([]byte, error) + // PostDownloadObject is called after HandleDownloadObjectTask, it can recycle + // resources, make statistics and do some other operations.. + PostDownloadObject(ctx context.Context, task task.DownloadObjectTask) + + // PreDownloadPiece prepares to handle DownloadPiece, it can do some checks such as check for duplicates, + // if limitation of SP has been reached, etc. + PreDownloadPiece(ctx context.Context, task task.DownloadPieceTask) error + // HandleDownloadPieceTask handles the DownloadPiece and get data from piece store. + HandleDownloadPieceTask(ctx context.Context, task task.DownloadPieceTask) ([]byte, error) + // PostDownloadPiece is called after HandleDownloadPieceTask, it can recycle + // resources, make statistics and do some other operations. + PostDownloadPiece(ctx context.Context, task task.DownloadPieceTask) + + // PreChallengePiece prepares to handle ChallengePiece, it can do some checks + // such as checking for duplicates, if limitation of SP has been reached, etc. + PreChallengePiece(ctx context.Context, task task.ChallengePieceTask) error + // HandleChallengePiece handles ChallengePiece, get piece data from piece store and get integrity hash from db. + HandleChallengePiece(ctx context.Context, task task.ChallengePieceTask) ([]byte, [][]byte, []byte, error) + // PostChallengePiece is called after HandleChallengePiece, it can recycle resources, make statistics + // and do some other operations. + PostChallengePiece(ctx context.Context, task task.ChallengePieceTask) + // QueryTasks queries download/challenge tasks that running on downloader by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) +} +``` + +Downloader interface inherits [Modular interface](./lifecycle_modular.md#modular-interface), so Downloader module can be managed by lifycycle and resource manager. In terms of the functions provided by Downloader module, it can be divided into three parts: DownloadObject, DownloadPiece and ChallengePiece. They all have three methods: PreXXX, HanldeXXX and PostXXX. Therefore, you can rewrite these methods to meet your own requirements. As we can see from the second parameter of the methods defined in `Downloader` interface, DownloadObject is splitted into `DownloadObjectTask`, DownloadPiece is splitted into `DownloadPieceTask` and ChallengePiece is splitted into `ChallengePieceTask`. They are also defined as an interface. We can query DownloadObject, DownloadPiece and ChallengePiece tasks that we care about by `QueryTasks` method through using subKey. + +## ObjectTask + +DownloadObjectTask, DownloadPieceTask and ChallengePieceTask all inherits `ObjectTask` interface. ObjectTask associated with an object and storage params, and records the information of different stages of the object. Considering the change of storage params on the greenfield, the storage params of each object should be determined when it is created, and it should not be queried during the task flow, which is inefficient and error-prone. + +ObjectTask interfaces definition is shown below: + +- [ObjectTask](./common/task.md#objecttask) + +You can overwrite all these methods in your own. + +The corresponding protobuf definition is shown below: + +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## DownloadObjectTask + +DownloadObjectTask is an abstract interface to record the information for downloading pieces of object payload data. DownloadObjectTask inherits ObjectTask interface. DownloadObjectTask also defines seven methods to help query info or set data. You can overwrite all these methods in your own. + +- [ObjectTask](./common/task.md#objecttask) +- [DownloadObjectTask](./common/task.md#downloadobjecttask) + +You can overwrite all these methods in your own. + +The corresponding protobuf definition is shown below: + +- [GfSpDownloadObjectTask](./common/proto.md#gfspdownloadobjecttask-proto) +- [BucketInfo](./common/proto.md#bucketinfo-proto) + +## DownloadPieceTask + +DownloadPieceTask is an abstract interface to record the information for downloading piece data. DownloadPieceTask inherits ObjectTask interface. DownloadPieceTask also defines ten methods to help query info or set data. You can overwrite all these methods in your own. + +- [ObjectTask](./common/task.md#objecttask) +- [DownloadPieceTask](./common/task.md#downloadpiecetask) + +You can overwrite all these methods in your own. + +The corresponding protobuf definition is shown below: + +- [GfSpDownloadPieceTask](./common/proto.md#gfspdownloadpiecetask-proto) +- [BucketInfo](./common/proto.md#bucketinfo-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## ChallengePieceTask + +It is always the first priority of any decentralized storage network to guarantee data integrity and availability. We use data challenge instead of storage proof to get better HA. There will be some data challenges to random pieces on greenfield chain continuously. And the SP, which stores the challenged piece, uses the challenge workflow to response. Each SP splits the object payload data to segments, and store segment data to piece store and store segment checksum to SP DB. + +ChallengePieceTask is an abstract interface to record the information for get challenge piece info, the validator get challenge info to confirm whether the sp stores the user's data correctly. + +- [ObjectTask](./common/task.md#objecttask) +- [ChallengePieceTask](./common/task.md#challengepiecetask) + +ChallengePieceTask defines 15 methods to help query info or set data. You can overwrite all these methods in your own. + +The corresponding protobuf definition is shown below: + +- [GfSpChallengePieceTask](./common/proto.md#gfspchallengepiecetask-proto) +- [BucketInfo](./common/proto.md#bucketinfo-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## GfSp Framework Downloader Code + +Downloader module code implementation: [Downloader](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/downloader) diff --git a/docs/modules/gateway.md b/docs/modules/gateway.md new file mode 100644 index 000000000..7480c833b --- /dev/null +++ b/docs/modules/gateway.md @@ -0,0 +1,84 @@ +# Gateway + +The Gateway service serves as the unified entrance of HTTP requests for SP, providing a standardized `HTTP RESTful API` for application programming. +If you are interested in the HTTP RESTful API, we invite you to the [following page](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/). + +## Overview + +
+
Gateway Architecture
+ +### Authorization Checker + +Gateway provides unified authorization for each HTTP request from the fowllowing three aspects: + +- Verifies the signature of request to ensure that the request has not been tampered with. +- Checks the authorization to ensure the corresponding account has permissions on resources. +- Checks the object state and payment account state to ensure the object is sealed and the payment account is active. + +### Request Router + +Based on the specific request type, it is routed to the corresponding backend microservice. + +### Flow Control + +Based on the flow control configuration policies, flow control will be performed to provide higher-quality services and avoid service overload. + +### Load Balancer(LB) + +In the future, when routing traffic to backend microservices in SP, SP Gateway would use LB to do this. LB is a method of distributing API request traffic across multiple upstream services. LB improves overall system responsiveness and reduces failures by preventing overloading of individual resources. + +### Middleware + +SP Gateway uses middleware to collect metrics, logging, register metadata and so on. + +### Universal Endpoint + +We implement the Universal Endpoint according to [Greenfield Whitepaper Universal Endpoint](https://github.com/bnb-chain/greenfield-whitepaper/blob/main/part3.md#231-universal-endpoint). + +All objects can be identified and accessed via a universal path: gnfd://?[parameter]* + +Explanation: + +- The beginning identifier `gnfd://` is mandatory and cannot be changed.. +- `bucket_name` is the bucket name of the object and is mandatory. +- `object_name` is the name of the object and is mandatory. +- The parameter is an optional list of key-value pairs that provide additional information for the URI. + +Each SP will register multiple endpoints to access their services, e.g. "SP1" may ask their users to download objects via https://gnfd-testnet-sp-1.bnbchain.org/download. +And the full download RESTful API would be like: https://gnfd-testnet-sp-1.bnbchain.org/download/mybucket/myobject.jpg. + +Universal Endpoint supports using any valid endpoint for any SP, and automatically redirects to the correct endpoint containing the object for downloading. + +For instance, when users access a testnet endpoint `gnfd-testnet-sp-1.bnbchain.org` of SP1, the request URL will be: https://gnfd-testnet-sp-1.bnbchain.org/download/mybucket/myobject.jpg. Universal Endpoint will find the correct endpoint for myobject.jpg, here SP3, and redirect the user to:https://gnfd-testnet-sp-3.bnbchain.org/download/mybucket/myobject.jpg and download the file. + +
+
Universal Endpoint Logic Flow
+ +#### Download File + +If you want to download a file using Universal Endpoint, downloading URL is like: https://gnfd-testnet-sp-1.bnbchain.org/download/mybucket/myobject.jpg. This is enforced by adding this Content-Type to HTTP headers: + +```text +Content-Disposition=attachment +``` + +#### View File + +If you want to view a file using Universal Endpoint, viewing url is like: https://gnfd-testnet-sp-1.bnbchain.org/view/mybucket/myobject.jpg. This is enforced by adding this Content-Type to HTTP headers: + +```text +Content-Disposition=inline +``` + +#### Public File Access + +Public files can be downloaded/viewed with the following points to notice: + +1. Downloader/Viewer's quota will not be deducted, but the object owner's quota will be deducted per `download or view`. +2. If a file's public or private status is not specified, its accessibility as a public or private file is determined by the status of the bucket it resides in, and whether it can be downloaded or viewed. +3. If a file is not sealed, it cannot be `downloaded or viewed`. + +#### Private File Access + +Access private file is in design and will be provided in the new few releases. Currently, if you try to download or view a private file, an error will be thrown to let you know the object key you are using is illegal. diff --git a/docs/modules/manager.md b/docs/modules/manager.md new file mode 100644 index 000000000..23bdf93ab --- /dev/null +++ b/docs/modules/manager.md @@ -0,0 +1,60 @@ +# Manager + +Manager is responsible for task scheduling such as dispatching tasks to TaskExecutor module, GC objects, GC zombie piece tasks and so on. Therefore, Manager module is somewhat similar to daemon process to help do some works. The workflow of Manager users can refer [Manager](../workflow/workflow.md#uploader). We currently abstract SP as the GfSp framework, which provides users with customizable capabilities to meet their specific requirements. Manager module provides an abstract interface, which is called `Manager`, as follows: + +Manager is an abstract interface to do some internal services management, it is responsible for task scheduling and other management of SP. + +```go +type Manager interface { + Modular + // DispatchTask dispatches tasks to TaskExecutor module when it asks tasks. + // It will consider remaining resources when dispatching task. + DispatchTask(ctx context.Context, limit rcmgr.Limit) (task.Task, error) + // QueryTasks queries tasks that hold on manager by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) + // HandleCreateUploadObjectTask handles the CreateUploadObject request from Uploader, before Uploader handles + // the users' UploadObject requests, it should send CreateUploadObject requests to Manager ask if it's ok. + // Through this interface SP implements the global uploading object strategy. + // + // For example: control the concurrency of global uploads, avoid repeated uploads, rate control, etc. + HandleCreateUploadObjectTask(ctx context.Context, task task.UploadObjectTask) error + // HandleDoneUploadObjectTask handles the result of uploading object payload data to primary, Manager should + // generate ReplicatePieceTask for TaskExecutor to run. + HandleDoneUploadObjectTask(ctx context.Context, task task.UploadObjectTask) error + // HandleReplicatePieceTask handles the result of replicating piece data to secondary SPs, + // the request comes from TaskExecutor. + HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) error + // HandleSealObjectTask handles the result of sealing object to the greenfield the request comes from TaskExecutor. + HandleSealObjectTask(ctx context.Context, task task.SealObjectTask) error + // HandleReceivePieceTask handles the result of receiving piece task, the request comes from Receiver that + // reports have completed ReceivePieceTask to manager and TaskExecutor that the result of confirming whether + // the object that is syncer to secondary SP has been sealed. + HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) error + // HandleGCObjectTask handles GCObjectTask, the request comes from TaskExecutor. + HandleGCObjectTask(ctx context.Context, task task.GCObjectTask) error + // HandleGCZombiePieceTask handles GCZombiePieceTask, the request comes from TaskExecutor. + HandleGCZombiePieceTask(ctx context.Context, task task.GCZombiePieceTask) error + // HandleGCMetaTask handles GCMetaTask, the request comes from TaskExecutor. + HandleGCMetaTask(ctx context.Context, task task.GCMetaTask) error + // HandleDownloadObjectTask handles DownloadObjectTask, the request comes from Downloader. + HandleDownloadObjectTask(ctx context.Context, task task.DownloadObjectTask) error + // HandleChallengePieceTask handles ChallengePieceTask, the request comes from Downloader. + HandleChallengePieceTask(ctx context.Context, task task.ChallengePieceTask) error +} +``` + +Manager interface inherits [Modular interface](./lifecycle_modular.md#modular-interface), so Uploader module can be managed by lifycycle and resource manager. + +In terms of the functions provided by Manager module, there are multiple handling tasks that do a lot of chores. In general, tasks handled by Manager module can be divided into `UploadObjectTask`, `ReplicatePieceTask`, `SealObjectTask`, `ReceivePieceTask`, `GCObjectTask`, `GCZombieTask`, `DownloadObjectTask` and `ChallengePieceTask`. In addition, it also provides `DispatchTask` and `QueryTasks`. The tasks handled by TaskExecutor module is dispatched from Manager module. We can query tasks that we care about by `QueryTasks` method through using subKey. + +Manager module only schedules ReplicatePiece and SealObject which belong to background tasks. For front tasks, Manager module just reponds and don't schedule them. As we can see from the second parameter of the methods defined in `Manager` interface, different is splitted into different tasks. They are also defined as an interface. + +## DispatchTask + +Manager module dispatches tasks to TaskExecutor. When dispatching tasks, Manager module should consider limiting resources to prevent resources from being exhasuted. So the second param of DispatchTask functions is rcmgr.Limit that is an interface to alloc system resources. Limit is an interface that that specifies basic resource limits. + +- [Limit](./common/lifecycle_modular.md#limit) + +## GfSp Framework Manager Code + +Manager module code implementation: [Manager](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/manager) diff --git a/docs/modules/p2p.md b/docs/modules/p2p.md new file mode 100644 index 000000000..8e4679e5e --- /dev/null +++ b/docs/modules/p2p.md @@ -0,0 +1,57 @@ +# P2P + +P2P networks are decentralized, meaning participants communicate directly with one another on a relative “equal footing.” No central server or authority controls the network. P2P networks do not require a privileged set of “servers” that behave differently from their “clients,” as in the predominant client-server model. + +P2P networks can take many forms, including file-sharing systems like BitTorrent, blockchain networks like Bitcoin and Ethereum, and decentralized communication standards like Matrix. These systems all have different challenges and tradeoffs, but they share the goal of improving upon the traditional client-server networking model. + +GfSp uses [libp2p](https://github.com/libp2p/go-libp2p) to complete p2p functions. If you are interested in p2p, you can refer libp2p for more information. + +SP abandoned the conventional p2p ping service, because the conventional ping service is managed and communicated in units of p2p nodes, without considering the fairness between SPs, the SP's p2p service is more inclined to synchronize messages to different SPs, instead of synchronous p2p nodes. For this purpose we have customized ping service. + +The customized ping service implements dynamic update of p2p permanent nodes. As usual, permanent nodes should cover as many SPs as possible, which is more decentralized, eg: get replicate approval request needs at least 6 or more replies from different SPs but p2p node are offline and replacement, which is an inevitable problem, If nodes belonging to the same sp all fail and are replaced, then the sp will be unable to communicate, this requires dynamic updates permanent nodes. + +The customized ping service also implements the pruning function of permanent nodes. For zombie nodes, pruning strategy takes into account the information of the SP dimension, and uses a very conservative pruning strategy. Nodes are only pruned if there are enough backups for one SP and multiple failed interactions, can try to keep each SP with enough nodes to try to connect, so that each sp has an equal opportunity to receive requests. + +We define a p2p interface to let you customize your own functions: + +``` go +// P2P is an abstract interface to the to do replicate piece approvals between SPs. +type P2P interface { + Modular + // HandleReplicatePieceApproval handles the asking replicate piece approval, it will + // broadcast the approval to other SPs, waiting the responses. If up to min approved + // number or max approved number before timeout, it will return the approvals. + HandleReplicatePieceApproval(ctx context.Context, task task.ApprovalReplicatePieceTask, min, max int32, + timeout int64) ([]task.ApprovalReplicatePieceTask, error) + // HandleQueryBootstrap handles the query p2p node bootstrap node info. + HandleQueryBootstrap(ctx context.Context) ([]string, error) + // QueryTasks queries replicate piece approval tasks that running on p2p by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) +} +``` + +P2P interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so P2P module can be managed by lifycycle and resource manager. + +In terms of the functions provided by P2P module, there are three methods: HandleReplicatePieceApproval, HandleQueryBootstrap and QueryTasks. + +As we can see from the second parameter of `HandleReplicatePieceApproval` defined in `P2P` interface, there is a replicate piece approval task. It's also defined as an interface. + +We can query UploadObject tasks that we care about by `QueryTasks` method through using subKey. + +## ApprovalReplicatePieceTask + +ApprovalReplicatePieceTask is an abstract interface to record the ask replicate pieces to other SPs(as secondary SP for the object). It is initiated by the primary SP in the replicate pieces phase. Before the primary SP sends it to other SPs, the primary SP will sign the task, other SPs will verify it is sent by a legitimate SP. If other SPs approved the approval, they will SetExpiredHeight and signs the ApprovalReplicatePieceTask. + +- [ApprovalReplicatePieceTask](./common/task.md#approvalreplicatepiecetask) +- [ApprovalTask](./common/task.md#approvaltask) +- [ObjectTask](./common/task.md#objecttask) + +The corresponding protobuf definition is shown below: + +- [GfSpReplicatePieceApprovalTask](./common/proto.md#gfspreplicatepieceapprovaltask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## GfSp Framework P2P Code + +P2P module code implementation: [P2P](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/p2p) diff --git a/docs/modules/receiver.md b/docs/modules/receiver.md new file mode 100644 index 000000000..bb57bdfa6 --- /dev/null +++ b/docs/modules/receiver.md @@ -0,0 +1,45 @@ +# Receiver + +Receiver is responsible for accepting piece data that replicated from primary SP. When receiving piece data from primary SP, piece data is transferred from Gateway and then use gRPC to receiver module. + +Receiver is an abstract interface to receive the piece data from primary SP. + +```go +type Receiver interface { + Modular + // HandleReceivePieceTask stores piece data into secondary SP. + HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask, data []byte) error + // HandleDoneReceivePieceTask calculates the integrity hash of the object and sign it, returns to the primary + // SP for sealed object. + HandleDoneReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) ([]byte, []byte, error) + // QueryTasks queries replicate piece tasks that running on receiver by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) +} +``` + +Receiver interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so Receiver module can be managed by lifycycle and resource manager. + +In terms of the functions provided by Receiver module, there is only one part: receive piece data. There are three methods in Receiver interface. Therefore, you can rewrite these methods to meet your own requirements. + +As we can see from the second parameter of the methods defined in `Receiver` interface, ReceivePiece is splitted into `ReceivePieceTask`. They are also defined as an interface. + +We can query ReceivePiece tasks that we care about by `QueryTasks` method through using subKey. + +## ReceivePieceTask + +The corresponding interfaces definition is shown below: + +- [ReceivePieceTask](./common/task.md#receivepiecetask) +- [ObjectTask](./common/task.md#objecttask) + +ObjectTask inherits [Task interface](./common/task.md#task). ReceivePieceTask also defines 14 methods to help query info or set data. You can overwrite all these methods in your own. + +The corresponding `protobuf` definition is shown below: + +- [GfSpReceivePieceTask](./common/proto.md#gfspreceivepiecetask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## GfSp Framework Receiver Code + +Receiver module code implementation: [Receiver](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/receiver) diff --git a/docs/modules/sginer.md b/docs/modules/sginer.md new file mode 100644 index 000000000..1b9d54371 --- /dev/null +++ b/docs/modules/sginer.md @@ -0,0 +1,101 @@ +# Signer + +Signer uses the SP's private keys to sign the message, the messages to form a transaction and sign the transaction to broadcast it to Greenfield BlockChain, or the messages exchanged between SPs. + +Signer is an abstract interface to handle the signature of SP and on greenfield chain operator. It holds all private keys of one SP. Considering the SP account's sequence number, it must be a singleton. + +```go +type Signer interface { + Modular + // SignCreateBucketApproval signs the MsgCreateBucket for asking create bucket approval. + SignCreateBucketApproval(ctx context.Context, bucket *storagetypes.MsgCreateBucket) ([]byte, error) + // SignCreateObjectApproval signs the MsgCreateObject for asking create object approval. + SignCreateObjectApproval(ctx context.Context, task *storagetypes.MsgCreateObject) ([]byte, error) + // SignReplicatePieceApproval signs the ApprovalReplicatePieceTask for asking replicate pieces to secondary SPs. + SignReplicatePieceApproval(ctx context.Context, task task.ApprovalReplicatePieceTask) ([]byte, error) + // SignReceivePieceTask signs the ReceivePieceTask for replicating pieces data between SPs. + SignReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) ([]byte, error) + // SignIntegrityHash signs the integrity hash of object for sealing object. + SignIntegrityHash(ctx context.Context, objectID uint64, hash [][]byte) ([]byte, []byte, error) + // SignP2PPingMsg signs the ping msg for p2p node probing. + SignP2PPingMsg(ctx context.Context, ping *gfspp2p.GfSpPing) ([]byte, error) + // SignP2PPongMsg signs the pong msg for p2p to response ping msg. + SignP2PPongMsg(ctx context.Context, pong *gfspp2p.GfSpPong) ([]byte, error) + // SealObject signs the MsgSealObject and broadcast the tx to greenfield. + SealObject(ctx context.Context, object *storagetypes.MsgSealObject) error + // RejectUnSealObject signs the MsgRejectSealObject and broadcast the tx to greenfield. + RejectUnSealObject(ctx context.Context, object *storagetypes.MsgRejectSealObject) error + // DiscontinueBucket signs the MsgDiscontinueBucket and broadcast the tx to greenfield. + DiscontinueBucket(ctx context.Context, bucket *storagetypes.MsgDiscontinueBucket) error +} +``` + +Signer interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so Uploader module can be managed by lifycycle and resource manager. + +In terms of the functions provided by Signer module, there are ten methods. You can rewrite these methods to meet your own requirements. + +## SignCreateBucketApproval + +The corresponding `protobuf` definition is shown below: + +- [MsgCreateBucket](./common/proto.md#msgcreatebucket-proto) + +## SignCreateObjectApproval + +The corresponding `protobuf` definition is shown below: + +- [MsgCreateObject](./common/proto.md#msgcreateobject-proto) + +## SignReplicatePieceApproval + +The second params of SignReplicatePieceApproval is a task interface, the corresponding interface definition is shown below: + +- [ApprovalReplicatePieceTask](./common/task.md#approvalreplicatepiecetask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpReplicatePieceApprovalTask](./common/proto.md#gfspreplicatepieceapprovaltask-proto) + +## SignReceivePieceTask + +The second params of SignReceivePieceTask is a task interface, the corresponding interface definition is shown below: + +- [ReceivePieceTask](./common/task.md#approvalreplicatepiecetask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpReceivePieceTask](./common/proto.md#gfspreceivepiecetask-proto) + +## SignP2PPingMsg + +The corresponding `protobuf` definition is shown below: + +- [GfSpPing](./common/proto.md#gfspping-proto) + +## SignP2PPongMsg + +The corresponding `protobuf` definition is shown below: + +- [GfSpPong](./common/proto.md#gfsppong-proto) + +## SealObject + +The corresponding `protobuf` definition is shown below: + +- [MsgSealObject](./common/proto.md#msgsealobject) + +## RejectUnSealObject + +The corresponding `protobuf` definition is shown below: + +- [MsgRejectSealObject](./common/proto.md#msgrejectsealobject-proto) + +## DiscontinueBucket + +The corresponding `protobuf` definition is shown below: + +- [MsgDiscontinueBucket](./common/proto.md#msgdiscontinuebucket) + +## GfSp Framework Signer Code + +Signer module code implementation: [Signer](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/signer) diff --git a/docs/modules/taskexecutor.md b/docs/modules/taskexecutor.md new file mode 100644 index 000000000..58c2a4546 --- /dev/null +++ b/docs/modules/taskexecutor.md @@ -0,0 +1,115 @@ +# TaskExecutor + +TaskExecutor is the execution unit of SP background tasks. It is a resource consuming service, for which we introduce a resource manager, which will stop pulling(supported in the future) or accept tasks when the resource limit is reached. + +TaskExecutor is an abstract interface to handle background tasks. It will ask tasks from manager modular, handle tasks and report the results or status to the manager modular. It can handle these tasks: ReplicatePieceTask, SealObjectTask, ReceivePieceTask, GCObjectTask, GCZombiePieceTask, GCMetaTask. Therefore, you can rewrite these methods to meet your own requirements. + +```go +type TaskExecutor interface { + Modular + // AskTask asks the task by remaining limitation from manager module. + AskTask(ctx context.Context) error + // HandleReplicatePieceTask handles ReplicatePieceTask that is asked from manager module. + HandleReplicatePieceTask(ctx context.Context, task task.ReplicatePieceTask) + // HandleSealObjectTask handles SealObjectTask that is asked from manager module. + HandleSealObjectTask(ctx context.Context, task task.SealObjectTask) + // HandleReceivePieceTask handles the ReceivePieceTask that is asked from manager module. + // It will confirm the piece data that is synced to secondary SP whether has been sealed. + HandleReceivePieceTask(ctx context.Context, task task.ReceivePieceTask) + // HandleGCObjectTask handles the GCObjectTask that is asked from manager module. + HandleGCObjectTask(ctx context.Context, task task.GCObjectTask) + // HandleGCZombiePieceTask handles the GCZombiePieceTask that is asked from manager module. + HandleGCZombiePieceTask(ctx context.Context, task task.GCZombiePieceTask) + // HandleGCMetaTask handles the GCMetaTask that is asked from manager module. + HandleGCMetaTask(ctx context.Context, task task.GCMetaTask) + // ReportTask reports the results or status of running task to manager module. + ReportTask(ctx context.Context, task task.Task) error +} +``` + +TaskExecutor interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so TaskExecutor module can be managed by lifycycle and resource manager. As we can see from the second parameter of the methods defined in `TaskExecutor` interface, there are many tasks. They are also defined as an interface. We can report tasks of TaskExecutor to Manager module. + +## GC + +Garbage Collection(GC) is the background service running in SP, which is used to recycle the storage space of those deleted objects on the Greenfield chain. GC service is running in background periodically. It comprises below steps: + +- When SP starting up, load the metadata "BlockNumberHandledByGC" stored in the local metadata database, which is used to record the block height handled by GC service; +- Check if the previous background GC tasks have finished; if not, continue the tasks based on the contexts stored in the local metadata database; +- Get the deleted object list from the metadata service based on block height, construct GC tasks and dispatch them to TaskNode service; +- Each TaskNode runs GC tasks to remove all the pieces of the deleted objects from piece store, updates local SP's metadata as well. + +## AskTask + +The second param of AskTask method is `Limit` interface: + +- [Limit](./common/lifecycle_modular.md#limit) + +## HandleReplicatePieceTask + +The corresponding interfaces definition is shown below: + +- [ReplicatePieceTask](./common/task.md#replicatepiecetask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpReplicatePieceTask](./common/proto.md#gfspreplicatepiecetask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## HandleSealObjectTask + +The corresponding interfaces definition is shown below: + +- [SealObjectTask](./common/task.md#sealobjecttask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpSealObjectTask](./common/proto.md#gfspsealobjecttask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## HandleReceivePieceTask + +The corresponding interfaces definition is shown below: + +- [ReceivePieceTask](./common/task.md#receivepiecetask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpReceivePieceTask](./common/proto.md#gfspreceivepiecetask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## HandleGCObjectTask + +The corresponding interfaces definition is shown below: + +- [GCObjectTask](./common/task.md#gcobjecttask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpGCObjectTask](./common/proto.md#gfspgcobjecttask-proto) + +## HandleGCZombiePieceTask + +The corresponding interfaces definition is shown below: + +- [GCZombiePieceTask](./common/task.md#gczombiepiecetask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpGCZombiePieceTask](./common/proto.md#gfspgczombiepiecetask-proto) + +## HandleGCMetaTask + +The corresponding interfaces definition is shown below: + +- [GCMetaTask](./common/task.md#gcmetatask) + +The corresponding `protobuf` definition is shown below: + +- [GfSpGCMetaTask](./common/proto.md#gfspgcmetatask-proto) + +## GfSp Framework TaskExecutor Code + +TaskExecutor module code implementation: [TaskExecutor](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/taskexecutor) diff --git a/docs/modules/uploader.md b/docs/modules/uploader.md new file mode 100644 index 000000000..ae2032e18 --- /dev/null +++ b/docs/modules/uploader.md @@ -0,0 +1,42 @@ +# Uploader + +Uploader is responsible for receiving and cutting the object payload data into segments, then storing it into PieceStore, and notifying the downstream service to asynchronously replicate the object payload data that processed by EC algorithm to the secondary SP. The workflow of Uploader users can refer [Uploader](../workflow/workflow.md#uploader). We currently abstract SP as the GfSp framework, which provides users with customizable capabilities to meet their specific requirements. Uploader module provides an abstract interface, which is called `Uploader`, as follows: + +Uploader is an abstract interface to handle putting object requests from users' account and store their payload data into primary SP piece store. + +```go +type Uploader interface { + Modular + // PreUploadObject prepares to handle UploadObject, it can do some checks + // such as checking for duplicates, if limitation of SP has been reached, etc. + PreUploadObject(ctx context.Context, task task.UploadObjectTask) error + // HandleUploadObjectTask handles the UploadObject, store payload data into piece store by data stream. + HandleUploadObjectTask(ctx context.Context, task task.UploadObjectTask, stream io.Reader) error + // PostUploadObject is called after HandleUploadObjectTask, it can recycle + // resources, make statistics and do some other operations. + PostUploadObject(ctx context.Context, task task.UploadObjectTask) + // QueryTasks queries upload object tasks that running on uploading by task sub-key. + QueryTasks(ctx context.Context, subKey task.TKey) ([]task.Task, error) +} +``` + +Uploader interface inherits [Modular interface](./common/lifecycle_modular.md#modular-interface), so Uploader module can be managed by lifycycle and resource manager. In terms of the functions provided by Uploader module, there is only one part: just upload object. It has three methods: PreXXX, HanldeXXX and PostXXX. Therefore, you can rewrite these methods to meet your own requirements. As we can see from the second parameter of the methods defined in `Uploader` interface, uploadObject is splitted into `UploadObjectTask`. They are also defined as an interface. We can query UploadObject tasks that we care about by `QueryTasks` method through using subKey. + +## UploadObjectTask + +The corresponding interfaces definition is shown below: + +- [ObjectTask](./common/task.md#objecttask) +- [UploadObjectTask](./common/task.md#uploadobjecttask) + +ObjectTask inherits [Task interface](./common/task.md#task). UploadObjectTask also defines ten methods to help query info or set data. You can overwrite all these methods in your own. + +The corresponding `protobuf` definition is shown below: + +- [GfSpUploadObjectTask](./common/proto.md#gfspuploadobjecttask-proto) +- [ObjectInfo](./common/proto.md#objectinfo-proto) +- [Params](./common/proto.md#params-proto) + +## GfSp Framework Uploader Code + +Uploader module code implementation: [Uploader](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/modular/uploader) diff --git a/docs/readme.md b/docs/readme.md index 1ce039bb4..708db47dd 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,20 +1,28 @@ -## Overview -This section dives into the internals of the Greenfield Storage Provider implementation. +# Overview -### Table of Contents +This section explores the inner componnet of the Greenfield Storage Provider implementation. + +## Table of Contents - Introduction - - [Overview](introduction/01-overview.md) + - [Overview](./introduction/overview.md) - Modules - - [PieceStore](modules/01-piece_store.md) - - [Redundancy](modules/02-redundancy.md) - - [Storage Provider DB](modules/03-sp_db.md) - - [Block Syncer DB](modules/04-bs_db.md) -- Key Workflow - - [Get Approval](workflow/01-get_approval.md) - - [Upload Payload](workflow/02-put_payload_data.md) - - [Download Payload](workflow/03-get_payload_data.md) - - [Challenge Piece](workflow/04-challenge_piece_data.md) -- Run SP - - [Deployment](run-book/01-deployment.md) - - [Setup Local for Test](run-book/02-localup.md) + - [Gateway](./modules/gateway.md) + - [Authenticator](./modules/authenticator.md) + - [Approver](./modules/approver.md) + - [Uploader](./modules/uploader.md) + - [Downloader](./modules/downloader.md) + - [Manager](./modules/manager.md) + - [TaskExecutor](./modules/taskexecutor.md) + - [Receiver](./modules/receiver.md) + - [Signer](./modules/signer.md) + - [P2P](./modules/p2p.md) +- Store + - [PieceStore](./modules/piece_store.md) + - [Redundancy](./modules/redundancy.md) + - [SP DB](./modules/sp_db.md) + - [BS DB](./modules/bs_db.md) +- Workflow + - [SP Workflow](./workflow/workflow.md) +- Config + - [Config](./spconfig/config_template.toml) diff --git a/docs/run-book/01-deployment.md b/docs/run-book/01-deployment.md deleted file mode 100644 index dbc4a2b01..000000000 --- a/docs/run-book/01-deployment.md +++ /dev/null @@ -1,115 +0,0 @@ -## Dependence - -* SQL: no requirements for the SQL DB version - -> MariaDB - 5.5.68 and Aurora(MySQL 5.7) 2.10.3 has been practiced. - -* Payload Store: [AWS S3](https://aws.amazon.com/s3), [MinIO](https://min.io)(Beta) - -## Compile - -```shell -# clone source -git clone https://github.com/bnb-chain/greenfield-storage-provider.git - -# install compile tools -cd greenfield-storage-provider -make install-tools - -# compile -bash build.sh - -# show the gnfd-sp version information -cd build -./gnfd-sp version - -Greenfield Storage Provider - __ _ __ - _____/ /_____ _________ _____ ____ ____ _________ _ __(_)___/ /__ _____ - / ___/ __/ __ \/ ___/ __ / __ / _ \ / __ \/ ___/ __ \ | / / / __ / _ \/ ___/ - (__ ) /_/ /_/ / / / /_/ / /_/ / __/ / /_/ / / / /_/ / |/ / / /_/ / __/ / - /____/\__/\____/_/ \__,_/\__, /\___/ / .___/_/ \____/|___/_/\__,_/\___/_/ - /____/ /_/ - -Version : vx.x.x -Branch : master -Commit : 6eb30c3bda1a29fc97a4345559944c35cd560517 -Build : go1.20.1 darwin amd64 2023-03-04 23:54 - - -# show the gnfd-sp help -./gnfd-sp -h -``` - -## Join greenfield chain -[join mainnet, join testnet, setup private netWork](https://github.com/bnb-chain/greenfield/tree/master/docs/tutorial) - -## Add SP to greenfield chain -[add sp to greenfield chain](https://github.com/bnb-chain/greenfield/blob/fynn/doc/docs/tutorial/07-storage-provider.md) - -## Make configuration - ```shell - # dump the configuration template to './config.toml' - ./gnfd-sp config.dump - ``` - -[edit configuration template](../../config/config_template.toml) - -## Start with local model -```shell -# show services list -./gnfd-sp list -auth Handle off chain auth requests -blocksyncer Syncs block data to db -challenge Provides the ability to query the integrity hash and piece data -downloader Downloads object from the backend and statistical read traffic -gateway Receives the sdk request -metadata Provides the ability to query meta data -receiver Receives data pieces of an object from other storage provider and store -signer Sign the transaction and broadcast to chain -tasknode Executes background task -uploader Uploads object payload to greenfield - - -# start -nohup ./gnfd-sp -config ${config_file} -server ${service_name_list} 2>&1 & - -# gnfd-sp supports obtaining sensitive information from environment variables, these includes: -# AWS -AWS_ACCESS_KEY -AWS_SECRET_KEY -AWS_SESSION_TOKEN -BUCKET_URL - -# SQLDB -SP_DB_USER -SP_DB_PASSWORD -SP_DB_ADDRESS -SP_DB_DATABASE - -# signer service environment variables -SIGNER_OPERATOR_PRIV_KEY -SIGNER_FUNDING_PRIV_KEY -SIGNER_APPROVAL_PRIV_KEY -SIGNER_SEAL_PRIV_KEY -``` - -## Start with remote mode - -```shell - export SP_DB_USER=${SP_DB_USER} - export SP_DB_PASSWORD=${SP_DB_PASSWORD} - export SP_DB_ADDRESS=${SP_DB_ADDRESS} - export SP_DB_DATABASE=${SP_DB_DATABASE} - - # upload configuration - ./gnfd-sp config.upload -c ${config_file_path} - or -./gnfd-sp config.upload -c ${config_file_path} -db.user ${db_user} -db.password ${db_password} -db.address ${db_address} -db.database ${db_database} - - - # start service - nohup ./gnfd-sp config.remote -server ${service_name_list} 2>&1 & - or - nohup ./gnfd-sp config.remote -server ${service_name_list} -db.user ${db_user} -db.password ${db_password} -db.address ${db_address} -db.database ${db_database} 2>&1 & -``` diff --git a/docs/run-book/02-localup.md b/docs/run-book/02-localup.md deleted file mode 100644 index 03adffbc2..000000000 --- a/docs/run-book/02-localup.md +++ /dev/null @@ -1,74 +0,0 @@ -## Setup Local StorageProviders - -## Dependence -* SQLDB: MariaDB - 5.5.68 and Aurora(MySQL 5.7) 2.10.3 has been practiced. - -## Setup local greenfield chain -[setup private network](https://github.com/bnb-chain/greenfield/blob/master/docs/tutorial/03-local-network.md) - -## Add SP to greenfield chain -[add sp to greenfield chain](https://github.com/bnb-chain/greenfield/blob/master/docs/cli/storage-provider.md) - -## Setup local sps -1. Generate localup env - -Including build sp binary, generate directories/configs, create databases. -```bash -# The first time setup GEN_CONFIG_TEMPLATE=1, and the other time is 0. -# When equal to 1, the configuration template will be generated. -GEN_CONFIG_TEMPLATE=1 -bash ./deployment/localup/localup.sh --reset ${GEN_CONFIG_TEMPLATE} -``` - -2. Overwrite db and sp info - -Overwrite all sps' db.info and sp.info according to the real environment. - -``` -deployment/localup/local_env/ -├── sp0 -│   ├── config.toml # templated config -│   ├── db.info # to overwrite real db info -│   ├── gnfd-sp0 # sp binary -│   └── sp.info # to overwrite real sp info -├── sp1 -├── ... -``` - -3. Start sp - -Make config.toml real according to db.info and sp.info, and start sps. - -```bash -# In first time setup GEN_CONFIG_TEMPLATE=1, and the other time is 0. -# When equal to 1, the configuration template will be generated. -GEN_CONFIG_TEMPLATE=0 -bash ./deployment/localup/localup.sh --reset ${GEN_CONFIG_TEMPLATE} -bash ./deployment/localup/localup.sh --start -``` -The environment directory is as follows: -``` -deployment/localup/local_env/ -├── sp0 -│   ├── config.toml # real config -│   ├── data # piecestore data directory -│   ├── db.info -│   ├── gnfd-sp0 -│   ├── gnfd-sp.log # gnfd-sp log file -│   ├── log.txt -│   └── sp.info -├── sp1 -├── ... -``` -4. Other supported commands - -```bash -% bash ./deployment/localup/localup.sh --help -Usage: deployment/localup/localup.sh [option...] {help|reset|start|stop|print} - - --help display help info - --reset $GEN_CONFIG_TEMPLATE reset env, $GEN_CONFIG_TEMPLATE=0 or =1 - --start start storage providers - --stop stop storage providers - --print print sp local env work directory -``` diff --git a/docs/spconfig/config_template.toml b/docs/spconfig/config_template.toml new file mode 100644 index 000000000..a8063eed4 --- /dev/null +++ b/docs/spconfig/config_template.toml @@ -0,0 +1,179 @@ +AppID = '' +Server = [] +GRPCAddress = '' + +[SpDB] +User = '' +Passwd = '' +Address = '' +Database = '' +ConnMaxLifetime = 0 +ConnMaxIdleTime = 0 +MaxIdleConns = 0 +MaxOpenConns = 0 + +[BsDB] +User = '' +Passwd = '' +Address = '' +Database = '' +ConnMaxLifetime = 0 +ConnMaxIdleTime = 0 +MaxIdleConns = 0 +MaxOpenConns = 0 + +[BsDBBackup] +User = '' +Passwd = '' +Address = '' +Database = '' +ConnMaxLifetime = 0 +ConnMaxIdleTime = 0 +MaxIdleConns = 0 +MaxOpenConns = 0 + +[PieceStore] +Shards = 0 + +[PieceStore.Store] +Storage = '' +BucketURL = '' +MaxRetries = 0 +MinRetryDelay = 0 +TLSInsecureSkipVerify = false +IAMType = '' + +[Chain] +ChainID = '' +ChainAddress = [] +GasLimit = 0 + +[SpAccount] +SpOperatorAddress = '' +OperatorPrivateKey = '' +FundingPrivateKey = '' +SealPrivateKey = '' +ApprovalPrivateKey = '' +GcPrivateKey = '' + +[Endpoint] +ApproverEndpoint = '' +ManagerEndpoint = '' +DownloaderEndpoint = '' +ReceiverEndpoint = '' +MetadataEndpoint = '' +UploaderEndpoint = '' +P2PEndpoint = '' +SignerEndpoint = '' +AuthenticatorEndpoint = '' + +[Approval] +BucketApprovalTimeoutHeight = 0 +ObjectApprovalTimeoutHeight = 0 +ReplicatePieceTimeoutHeight = 0 + +[Bucket] +AccountBucketNumber = 0 +FreeQuotaPerBucket = 0 +MaxListReadQuotaNumber = 0 +MaxPayloadSize = 0 + +[Gateway] +DomainName = '' +HTTPAddress = '' + +[Executor] +MaxExecuteNumber = 0 +AskTaskInterval = 0 +AskReplicateApprovalTimeout = 0 +AskReplicateApprovalExFactor = 0.0 +ListenSealTimeoutHeight = 0 +ListenSealRetryTimeout = 0 +MaxListenSealRetry = 0 + +[P2P] +P2PPrivateKey = '' +P2PAddress = '' +P2PAntAddress = '' +P2PBootstrap = [] +P2PPingPeriod = 0 + +[Parallel] +GlobalCreateBucketApprovalParallel = 0 +GlobalCreateObjectApprovalParallel = 0 +GlobalMaxUploadingParallel = 0 +GlobalUploadObjectParallel = 0 +GlobalReplicatePieceParallel = 0 +GlobalSealObjectParallel = 0 +GlobalReceiveObjectParallel = 0 +GlobalGCObjectParallel = 0 +GlobalGCZombieParallel = 0 +GlobalGCMetaParallel = 0 +GlobalDownloadObjectTaskCacheSize = 0 +GlobalChallengePieceTaskCacheSize = 0 +GlobalBatchGcObjectTimeInterval = 0 +GlobalGcObjectBlockInterval = 0 +GlobalGcObjectSafeBlockDistance = 0 +GlobalSyncConsensusInfoInterval = 0 +UploadObjectParallelPerNode = 0 +ReceivePieceParallelPerNode = 0 +DownloadObjectParallelPerNode = 0 +ChallengePieceParallelPerNode = 0 +AskReplicateApprovalParallelPerNode = 0 +QuerySPParallelPerNode = 0 +DiscontinueBucketEnabled = false +DiscontinueBucketTimeInterval = 0 +DiscontinueBucketKeepAliveDays = 0 + +[Task] +UploadTaskSpeed = 0 +DownloadTaskSpeed = 0 +ReplicateTaskSpeed = 0 +ReceiveTaskSpeed = 0 +SealObjectTaskTimeout = 0 +GcObjectTaskTimeout = 0 +GcZombieTaskTimeout = 0 +GcMetaTaskTimeout = 0 +SealObjectTaskRetry = 0 +ReplicateTaskRetry = 0 +ReceiveConfirmTaskRetry = 0 +GcObjectTaskRetry = 0 +GcZombieTaskRetry = 0 +GcMetaTaskRetry = 0 + +[Monitor] +DisableMetrics = false +DisablePProf = false +MetricsHTTPAddress = '' +PProfHTTPAddress = '' + +[Rcmgr] +DisableRcmgr = false + +[Log] +Level = '' +Path = '' + +[Metadata] +IsMasterDB = false +BsDBSwitchCheckIntervalSec = 0 + +[BlockSyncer] +Modules = [] +Dsn = '' +DsnSwitched = '' +Workers = 0 +EnableDualDB = false + +[APIRateLimiter] +PathPattern = [] +HostPattern = [] +APILimits = [] + +[APIRateLimiter.IPLimitCfg] +On = false +RateLimit = 0 +RatePeriod = '' + +[Manager] +EnableLoadTask = false diff --git a/docs/store/bs_db.md b/docs/store/bs_db.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/store/piece_store.md b/docs/store/piece_store.md new file mode 100644 index 000000000..0592bf343 --- /dev/null +++ b/docs/store/piece_store.md @@ -0,0 +1,116 @@ +# Piece Store + +## Vision + +Write once, run on every storage service. + +## Goal + +- Vendor-agnostic +- Production ready +- High performance +- High availability + +## Overview + +The core function of PieceStore module is to be compatible with multiple object storage or KV storage such as S3, GCS, Azure Blob, Minio, Ceph, IPFS, DiskFile, Memory, etc. Different SP can use different object storage without caring about the underlying implementation. + +
+
PieceStore Architecture
+ +[PieceStore code](https://github.com/bnb-chain/greenfield-storage-provider/tree/master/store/piecestore) + +### API Interfaces + +PieceStore provides encapsulating interfaces for upper-layer services to use. Now these APIs are called by local pakcage functions. We provide two interfaces to visit PieceStore as follows: + +```go +// PieceOp is the helper interface for piece key operator and piece size calculate. +type PieceOp interface { + // SegmentPieceKey returns the segment piece key used as the key of store piece store. + SegmentPieceKey(objectID uint64, segmentIdx uint32) string + // ECPieceKey returns the ec piece key used as the key of store piece store. + ECPieceKey(objectID uint64, segmentIdx uint32, replicateIdx uint32) string + // ChallengePieceKey returns the piece key used as the key of challenge piece key. + // if replicateIdx < 0 , returns the SegmentPieceKey, otherwise returns the ECPieceKey. + ChallengePieceKey(objectID uint64, segmentIdx uint32, replicateIdx int32) string + // MaxSegmentPieceSize returns the object max segment piece size by object payload size and + // max segment size that comes from storage params. + MaxSegmentPieceSize(payloadSize uint64, maxSegmentSize uint64) int64 + // SegmentPieceCount returns the segment piece count of object payload by object payload size + // and max segment size that comes from storage params. + SegmentPieceCount(payloadSize uint64, maxSegmentSize uint64) uint32 + // SegmentPieceSize returns the segment piece size of segment index by object payload size and + // max segment size that comes from storage params. + SegmentPieceSize(payloadSize uint64, segmentIdx uint32, maxSegmentSize uint64) int64 + // ECPieceSize returns the ec piece size of ec index, by object payload size, max segment + // size and chunk number that ths last two params comes from storage params. + ECPieceSize(payloadSize uint64, segmentIdx uint32, maxSegmentSize uint64, chunkNum uint32) int64 +} + +// PieceStore is the interface to piece store that store the object payload data. +type PieceStore interface { + // GetPiece returns the piece data from piece store by piece key. + // the piece can segment or ec piece key. + GetPiece(ctx context.Context, key string, offset, limit int64) ([]byte, error) + // PutPiece puts the piece data to piece store, it can put segment + // or ec piece data. + PutPiece(ctx context.Context, key string, value []byte) error + // DeletePiece deletes the piece data from piece store, it can delete + // segment or ec piece data. + DeletePiece(ctx context.Context, key string) error +} +``` + +PieceOp interface describes how you can combine segmentPieceKey or ECPieceKey and PieceSize or PieceCount info. PieceStore interface describes what operations lower layer provide to interact. You can overwrite all methods of these two interfaces to implement your own functions. In the future, upper-layer services could visit PieceStore through HTTP, RPC or P2P which is more decentralized. + +### Sharding + +PieceStore provides sharding function for data high availability. PieceStore uses `fnv` algorithm to shard piece data. If users want to use data sharding, you can configure `Shards = a(a is a number which 2 <= a <= 256)` in config.toml. + +**Note** The current implementation of sharding can only be used for multiple buckets in one region. The support of multi-region would be added in the future which will be more higher availability. + +### Compatibile With Multi Object Storage + +PieceStore is vendor-agnostic, so it will be compatibile with multi object storage. Now SP supports based storage such as `S3, MinIO, LDFS, OSS, DiskFile and Memory`. +Recommend using S3 or MinIO in production environment and [the releated config document is here](https://github.com/bnb-chain/greenfield-storage-provider/blob/master/store/piecestore/README.md). Users can experience PieceStore in local by DiskFile or Memory. The common interface is as follows: + +```go +// ObjectStorage is a common interface that must be implemented if some users want to use an object +// storage (such as S3, Azure Blob, Minio, OSS, COS, etc) +type ObjectStorage interface { + // String the description of an object storage + String() string + // CreateBucket create the bucket if not existed + CreateBucket(ctx context.Context) error + // GetObject gets data for the given object specified by key + GetObject(ctx context.Context, key string, offset, limit int64) (io.ReadCloser, error) + // PutObject puts data read from a reader to an object specified by key + PutObject(ctx context.Context, key string, reader io.Reader) error + // DeleteObject deletes an object + DeleteObject(ctx context.Context, key string) error + + // HeadBucket determines if a bucket exists and have permission to access it + HeadBucket(ctx context.Context) error + // HeadObject returns some information about the object or an error if not found + HeadObject(ctx context.Context, key string) (Object, error) + // ListObjects lists returns a list of objects + ListObjects(ctx context.Context, prefix, marker, delimiter string, limit int64) ([]Object, error) + // ListAllObjects returns all the objects as a channel + ListAllObjects(ctx context.Context, prefix, marker string) (<-chan Object, error) +} +``` + +If you want use a new storage system, you can implement the methods of ObjectStorage interface. It's very convenient! + +### Outlook + +PieceStore provides some fundamental functions: wrapped API interfaces, sharding and compatibile with multiple storage systems. However, there are more functions to be added in the future. + +1. Data Cache + +PieceStore is combined with object storage, cache is an important component for interacting efficiently between the local client and remote services. Read and write data can be loaded into cache in advance or asynchronously. Using caching technology can significantly reduce the latency of storag operations and increase data throughput compared to interact with remote services directly. + +2. Data sync + +PieceStore will provide data synchronization for conveniently transferring data between different storage systems or different regions. Therefore, different SPs can do geo-disaster recovery backup to ensure high availability of data. diff --git a/docs/modules/02-redundancy.md b/docs/store/redundancy.md similarity index 100% rename from docs/modules/02-redundancy.md rename to docs/store/redundancy.md diff --git a/docs/store/sp_db.md b/docs/store/sp_db.md new file mode 100644 index 000000000..e3faa2c13 --- /dev/null +++ b/docs/store/sp_db.md @@ -0,0 +1,285 @@ +# SPDB + +SP(Storage Provider Database) store needs to implement SPDB interface. SQL database is used by default. +The following mainly introduces the data schemas corresponding to several core interfaces. + +```go +// SPDB contains all the methods required by sql database +type SPDB interface { + UploadObjectProgressDB + GCObjectProgressDB + SignatureDB + TrafficDB + SPInfoDB + OffChainAuthKeyDB +} +``` + +## UploadObjectProgressDB + +UploadObjectProgressDB interface which records upload object related progress(includeing foreground and background) and state. You can overwrite all these methods to meet your requirements. + +```go +type UploadObjectProgressDB interface { + // InsertUploadProgress inserts a new upload object progress. + InsertUploadProgress(objectID uint64) error + // DeleteUploadProgress deletes the upload object progress. + DeleteUploadProgress(objectID uint64) error + // UpdateUploadProgress updates the upload object progress state. + UpdateUploadProgress(uploadMeta *UploadObjectMeta) error + // GetUploadState queries the task state by object id. + GetUploadState(objectID uint64) (storetypes.TaskState, error) + // GetUploadMetasToReplicate queries the latest upload_done/replicate_doing object to continue replicate. + // It is only used in startup. + GetUploadMetasToReplicate(limit int) ([]*UploadObjectMeta, error) + // GetUploadMetasToSeal queries the latest replicate_done/seal_doing object to continue seal. + // It is only used in startup. + GetUploadMetasToSeal(limit int) ([]*UploadObjectMeta, error) +} + +// UploadObjectMeta defines the upload object state and related seal info, etc. +type UploadObjectMeta struct { + ObjectID uint64 + TaskState storetypes.TaskState + SecondaryAddresses []string + SecondarySignatures [][]byte + ErrorDescription string +} +``` + +TaskState is defined in protobuf enum: + +```proto +enum TaskState { + TASK_STATE_INIT_UNSPECIFIED = 0; + + TASK_STATE_UPLOAD_OBJECT_DOING = 1; + TASK_STATE_UPLOAD_OBJECT_DONE = 2; + TASK_STATE_UPLOAD_OBJECT_ERROR = 3; + + TASK_STATE_ALLOC_SECONDARY_DOING = 4; + TASK_STATE_ALLOC_SECONDARY_DONE = 5; + TASK_STATE_ALLOC_SECONDARY_ERROR = 6; + + TASK_STATE_REPLICATE_OBJECT_DOING = 7; + TASK_STATE_REPLICATE_OBJECT_DONE = 8; + TASK_STATE_REPLICATE_OBJECT_ERROR = 9; + + TASK_STATE_SIGN_OBJECT_DOING = 10; + TASK_STATE_SIGN_OBJECT_DONE = 11; + TASK_STATE_SIGN_OBJECT_ERROR = 12; + + TASK_STATE_SEAL_OBJECT_DOING = 13; + TASK_STATE_SEAL_OBJECT_DONE = 14; + TASK_STATE_SEAL_OBJECT_ERROR = 15; +} +``` + +## GCObjectProgressDB + +GCObjectProgressDB interface which records gc object related progress. You can overwrite all these methods to meet your requirements. + +```go +type GCObjectProgressDB interface { + // InsertGCObjectProgress inserts a new gc object progress. + InsertGCObjectProgress(taskKey string, gcMeta *GCObjectMeta) error + // DeleteGCObjectProgress deletes the gc object progress. + DeleteGCObjectProgress(taskKey string) error + // UpdateGCObjectProgress updates the gc object progress. + UpdateGCObjectProgress(gcMeta *GCObjectMeta) error + // GetGCMetasToGC queries the latest gc meta to continue gc. + // It is only used in startup. + GetGCMetasToGC(limit int) ([]*GCObjectMeta, error) +} + +// GCObjectMeta defines the gc object range progress info. +type GCObjectMeta struct { + TaskKey string + StartBlockHeight uint64 + EndBlockHeight uint64 + CurrentBlockHeight uint64 + LastDeletedObjectID uint64 +} +``` + +## SignatureDB + +SignatureDB abstract object integrity interface. You can overwrite all these methods to meet your requirements. + +```go +type SignatureDB interface { + /* + Object Signature is used to get challenge info. + */ + // GetObjectIntegrity gets integrity meta info by object id. + GetObjectIntegrity(objectID uint64) (*IntegrityMeta, error) + // SetObjectIntegrity sets(maybe overwrite) integrity hash info to db. + SetObjectIntegrity(integrity *IntegrityMeta) error + // DeleteObjectIntegrity deletes the integrity hash. + DeleteObjectIntegrity(objectID uint64) error + /* + Piece Signature is used to help replicate object's piece data to secondary sps, which is temporary. + */ + // SetReplicatePieceChecksum sets(maybe overwrite) the piece hash. + SetReplicatePieceChecksum(objectID uint64, replicateIdx uint32, pieceIdx uint32, checksum []byte) error + // GetAllReplicatePieceChecksum gets all piece hashes. + GetAllReplicatePieceChecksum(objectID uint64, replicateIdx uint32, pieceCount uint32) ([][]byte, error) + // DeleteAllReplicatePieceChecksum deletes all piece hashes. + DeleteAllReplicatePieceChecksum(objectID uint64, replicateIdx uint32, pieceCount uint32) error +} + +// IntegrityMeta defines the payload integrity hash and piece checksum with objectID. +type IntegrityMeta struct { + ObjectID uint64 + IntegrityChecksum []byte + PieceChecksumList [][]byte + Signature []byte +} +``` + +## TrafficDB + +TrafficDB defines a series of traffic interfaces. You can overwrite all these methods to meet your requirements. + +```go +type TrafficDB interface { + // CheckQuotaAndAddReadRecord create bucket traffic firstly if bucket is not existed, + // and check whether the added traffic record exceeds the quota, if it exceeds the quota, + // it will return error, Otherwise, add a record and return nil. + CheckQuotaAndAddReadRecord(record *ReadRecord, quota *BucketQuota) error + // GetBucketTraffic return bucket traffic info, + // notice maybe return (nil, nil) while there is no bucket traffic. + GetBucketTraffic(bucketID uint64, yearMonth string) (*BucketTraffic, error) + // GetReadRecord return record list by time range. + GetReadRecord(timeRange *TrafficTimeRange) ([]*ReadRecord, error) + // GetBucketReadRecord return bucket record list by time range. + GetBucketReadRecord(bucketID uint64, timeRange *TrafficTimeRange) ([]*ReadRecord, error) + // GetObjectReadRecord return object record list by time range. + GetObjectReadRecord(objectID uint64, timeRange *TrafficTimeRange) ([]*ReadRecord, error) + // GetUserReadRecord return user record list by time range. + GetUserReadRecord(userAddress string, timeRange *TrafficTimeRange) ([]*ReadRecord, error) +} + +// ReadRecord defines a read request record, will decrease the bucket read quota. +type ReadRecord struct { + BucketID uint64 + ObjectID uint64 + UserAddress string + BucketName string + ObjectName string + ReadSize uint64 + ReadTimestampUs int64 +} + +// BucketQuota defines read quota of a bucket. +type BucketQuota struct { + ReadQuotaSize uint64 +} + +// BucketTraffic is record traffic by year and month. +type BucketTraffic struct { + BucketID uint64 + YearMonth string // YearMonth is traffic's YearMonth, format "2023-02". + BucketName string + ReadConsumedSize uint64 + ReadQuotaSize uint64 + ModifyTime int64 +} + +// TrafficTimeRange is used by query, return records in [StartTimestampUs, EndTimestampUs). +type TrafficTimeRange struct { + StartTimestampUs int64 + EndTimestampUs int64 + LimitNum int // is unlimited if LimitNum <= 0. +} +``` + +## SPInfoDB + +SPInfoDB defines a series of sp interfaces. You can overwrite all these methods to meet your requirements. + +```go +type SPInfoDB interface { + // UpdateAllSp update all sp info, delete old sp info. + UpdateAllSp(spList []*sptypes.StorageProvider) error + // FetchAllSp if status is nil return all sp info; otherwise return sp info by status. + FetchAllSp(status ...sptypes.Status) ([]*sptypes.StorageProvider, error) + // FetchAllSpWithoutOwnSp if status is nil return all sp info without own sp; + // otherwise return sp info by status without own sp. + FetchAllSpWithoutOwnSp(status ...sptypes.Status) ([]*sptypes.StorageProvider, error) + // GetSpByAddress return sp info by address and addressType. + GetSpByAddress(address string, addressType SpAddressType) (*sptypes.StorageProvider, error) + // GetSpByEndpoint return sp info by endpoint. + GetSpByEndpoint(endpoint string) (*sptypes.StorageProvider, error) + // GetOwnSpInfo return own sp info. + GetOwnSpInfo() (*sptypes.StorageProvider, error) + // SetOwnSpInfo set(maybe overwrite) own sp info. + SetOwnSpInfo(sp *sptypes.StorageProvider) error +} + +// SpAddressType identify address type of SP. +type SpAddressType int32 + +const ( + OperatorAddressType SpAddressType = iota + 1 + FundingAddressType + SealAddressType + ApprovalAddressType +) +``` + +protobuf definition is as follwos: + +```proto +// StorageProvider defines the meta info of storage provider +message StorageProvider { + // operator_address defines the account address of the storage provider's operator; It also is the unique index key of sp. + string operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // funding_address defines one of the storage provider's accounts which is used to deposit and reward. + string funding_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // seal_address defines one of the storage provider's accounts which is used to SealObject + string seal_address = 3 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // approval_address defines one of the storage provider's accounts which is used to approve use's createBucket/createObject request + string approval_address = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // gc_address defines one of the storage provider's accounts which is used for gc purpose. + string gc_address = 5 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // total_deposit defines the number of tokens deposited by this storage provider for staking. + string total_deposit = 6 [ + (cosmos_proto.scalar) = "cosmos.Int", + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + // status defines the current service status of this storage provider + Status status = 7; + // endpoint define the storage provider's network service address + string endpoint = 8; + // description defines the description terms for the storage provider. + Description description = 9 [(gogoproto.nullable) = false]; +} +``` + +## OffChainAuthKeyDB + +OffChainAuthKeyDB defines authentication operations in SpDB. You can overwrite all these methods to meet your requirements. + +```go +type OffChainAuthKeyDB interface { + GetAuthKey(userAddress string, domain string) (*OffChainAuthKey, error) + UpdateAuthKey(userAddress string, domain string, oldNonce int32, newNonce int32, newPublicKey string, newExpiryDate time.Time) error + InsertAuthKey(newRecord *OffChainAuthKey) error +} + +// OffChainAuthKey contains some info about authentication +type OffChainAuthKey struct { + UserAddress string + Domain string + + CurrentNonce int32 + CurrentPublicKey string + NextNonce int32 + ExpiryDate time.Time + + CreatedTime time.Time + ModifiedTime time.Time +} +``` diff --git a/docs/workflow/01-get_approval.md b/docs/workflow/01-get_approval.md deleted file mode 100644 index aace05839..000000000 --- a/docs/workflow/01-get_approval.md +++ /dev/null @@ -1,88 +0,0 @@ -# Get Approval - -When creating a bucket on the primary SP or storing data to one SP, such as CreateBucket, PutObject, ReplicateObjectData operations, -the request originator needs to send a GetApproval request to ask whether the SP is willing to serve the request. The SP can decide -whether it is willing to accept approval based on some dimensions, such ad bucket, object and user, eg: SP can reject users with -bad reputation, and can reject specific objects or buckets. The SP acknowledges the request by signing a message for the operation -and response to the originator, if the SP does not want to serve(the default policy is to serve, each SP can customize its own strategy), -it can refuse to the request. - -## Gateway -* Receives the GetApproval request from the request originator. -* Verifies the signature of request to ensure that the request has not been tampered with. -* Checks the authorization to ensure the corresponding account is existed. -* Fills the CreateBucket/PutObject/ReplicateObjectData message's timeout field and dispatches the request to Signer service. -* Gets Signature from Signer and fills the message's approval signature field, and returns to the request originator. - -### GetApproval message to primary SP -```protobuf -message Approval { - uint64 expired_height = 1; - bytes sig = 2; -} -message MsgCreateBucket { - option (cosmos.msg.v1.signer) = "creator"; - // creator is the account address of bucket creator, it is also the bucket owner. - string creator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - // bucket_name is a globally unique name of bucket - string bucket_name = 2; - // is_public means the bucket is private or public. if private, only bucket owner or grantee can read it, - // otherwise every greenfield user can read it. - bool is_public = 3; - // payment_address is an account address specified by bucket owner to pay the read fee. Default: creator - string payment_address = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - // primary_sp_address is the address of primary sp. - string primary_sp_address = 6 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - // primary_sp_approval is the approval info of the primary SP which indicates that primary sp confirm the user's request. - Approval primary_sp_approval = 7; - // read_quota - uint64 read_quota = 8; -} -message MsgCreateObject { - option (cosmos.msg.v1.signer) = "creator"; - // creator is the account address of object uploader - string creator = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; - // bucket_name is the name of the bucket where the object is stored. - string bucket_name = 2; - // object_name is the name of object - string object_name = 3; - // payload_size is size of the object's payload - uint64 payload_size = 4; - // is_public means the bucket is private or public. if private, only bucket owner or grantee can access it, - // otherwise every greenfield user can access it. - bool is_public = 5; - // content_type is a standard MIME type describing the format of the object. - string content_type = 6; - // primary_sp_approval is the approval info of the primary SP which indicates that primary sp confirm the user's request. - Approval primary_sp_approval = 7; - // expect_checksums is a list of hashes which was generate by redundancy algorithm. - repeated bytes expect_checksums = 8; - // redundancy_type can be ec or replica - RedundancyType redundancy_type = 9; - // expect_secondarySPs is a list of StorageProvider address, which is optional - repeated string expect_secondary_sp_addresses = 10 [(cosmos_proto.scalar) = "cosmos.AddressString"]; -} - -``` - - -### GetApproval message to secondary SP -```protobuf - -message ReplicateApproval { - uint64 expired_time = 1; - bytes sig = 2; -} -message MsgReplicateObjectData { - // object_info defines the object info for getting approval - bnbchain.greenfield.storage.ObjectInfo object_info = 1; - // replicate_approval is the approval info of replicating object - ReplicateApproval replicate_approval = 2; -} - -``` -* The SP receives the CreateBucket/CreateObject/ReplicateObjectData GetApproval request. - * If the SP is willing to serve the request, it fills the expired-height and expired-time fields of the message and sign it, then responses to the originator; otherwise it refuses with a message response. - -## Signer -* Receives the CreateBucket/PutObject/ReplicateObjectData message, sign it with the SP's private key and response to the Gateway service. diff --git a/docs/workflow/02-put_payload_data.md b/docs/workflow/02-put_payload_data.md deleted file mode 100644 index 94d25a193..000000000 --- a/docs/workflow/02-put_payload_data.md +++ /dev/null @@ -1,36 +0,0 @@ -# Put Object - -## Gateway -* Receives the PutObject request from the client. -* Verifies the signature of request to ensure that the request has not been tampered with. -* Checks the authorization to ensure the corresponding account has permissions on resources. -* Dispatches the request to Uploader. - -## Uploader -* Accepts object data in streaming and chops it into segments according to MaxSegmentSize. The MaxSegmentSize is the consensus result reached in the greenfield chain. And uploads the segments to PieceStore. -* Creates JobContext with the `INIT_UNSPECIFIED` initial state. Turns to `UPLOAD_OBJECT_DOING` state at the beginning of uploading segments. After uploading all segments, the JobContext's state enters `UPLOAD_OBJECT_DONE`. If any abnormal situation in the uploading, the JobContext's state will change to `UPLOAD_OBJECT_ERROR`. -* After uploading all the segments, insert all the segment data checksums and the root checksum into SP DB. -* Notifying the TaskNode, the Uploader will return to the client that the put object request is successful. - -## TaskNode -* Asynchronously executes replicating object data to secondary SPs, and the uploader can always quickly receive the successful result from the TaskNode. The JobContext's state turn to `ALLOC_SECONDARY_DOING` from `UPLOAD_OBJECT_DONE`. -* Sends the GetSecondarySPApproval request to P2P node, it will broadcast to other SPs , and collect results back to TaskNode for selecting the secondary SPs. The JobContext's state enters `ALLOC_SECONDARY_DONE`, and turns into `REPLICATE_OBJECT_DOING` state immediately from `ALLOC_SECONDARY_DONE` state. -* Gets segments from PieceStore in parallel and computes a data redundancy solution for these segments based on Erasure Coding (EC), generating the EC pieces. Reorganize the EC pieces into six replicate data groups, each replicate data group contains several EC pieces according to the Redundancy policy. -* Then sends the replicate data groups in streaming to the selected secondary SPs in parallel. -* The secondary SP information of JobContext will be updated once if the replicating of a secondary SP is completed, until all secondary SPs are completed, the state of the JobContext will be updated to `REPLICATE_OBJECT_DONE` from `REPLICATE_OBJECT_DOING`. - -## Receiver -* Checks the SecondarySP approval whether is self-signed and has timed out. If so, will return `SIGNATURE_ERROR` to the TaskNode. -* The Receiver works in the secondary SP, receives EC pieces that belong to the same replicate data group, and uploads the EC pieces to the secondary SP's PieceStore. -* Computes the EC pieces integrity checksum, sign the integrity checksum by SP's approval private key, then returns these to the TaskNode. - -## TaskNode -* Receives the response from secondary SPs' Receiver, and un-sign the signature to compare with the secondary SP's approval public key. -* Sends the MsgSealObject to the Signer for signing the seal object transaction and broadcasting to the greenfield chain with the secondary SPs' integrity hash and signature. The state of the JobContext turns to `SIGN_OBJECT_DOING` from `REPLICATE_OBJECT_DONE`, if the Signer success to broadcast the SealObjectTX, then enters the `SIGN_OBJECT_DONE` state, and enters `SEAL_OBJECT_TX_DOING` state immediately from `SIGN_OBJECT_DONE` state. -* Monitor the execution results of seal object transaction on the greenfield chain to determine whether the seal is successful. If so, the JobContext state enters the `SEAL_OBJECT_DONE` state. - - -#### Background -* [PieceStore](../modules/01-piece_store.md) -* [Redundancy](../modules/02-redundancy.md) -* [JobContext](../modules/03-sp_db.md) diff --git a/docs/workflow/03-get_payload_data.md b/docs/workflow/03-get_payload_data.md deleted file mode 100644 index ac2dd255e..000000000 --- a/docs/workflow/03-get_payload_data.md +++ /dev/null @@ -1,15 +0,0 @@ -# Get Object - -## Gateway -* Receives the GetObject request from the client. -* Verifies the signature of request to ensure that the request has not been tampered with. -* Checks the authorization to ensure the corresponding account has permissions on resources. -* Checks the object state and payment account state to ensure the object is sealed and the payment account is active. -* Dispatches the request to Downloader. - -## Downloader -* Receives the GetObject request from the Gateway service. -* Check whether the read traffic exceeds the quota. - * If exceeds the quota, the Downloader refuses to serve and returns a not-enough-quota error to the Gateway. - * If the quota is sufficient, the Downloader inserts read record into the SP traffic-db. -* Splits the GetObject request info the GetPiece requests(support range read) to get piece payload data, and returns the object payload data streaming to the Gateway. \ No newline at end of file diff --git a/docs/workflow/04-challenge_piece_data.md b/docs/workflow/04-challenge_piece_data.md deleted file mode 100644 index c66aa8d2b..000000000 --- a/docs/workflow/04-challenge_piece_data.md +++ /dev/null @@ -1,18 +0,0 @@ -# Challenge Object Data -It is always the first priority of any decentralized storage network to guarantee data integrity and availability. -We use data challenge instead of storage proof to get better HA. There will be some data challenges to random -pieces on greenfield chain continuously. And the SP, which stores the challenged piece, uses the challenge workflow -to response. Each SP splits the object payload data to segments, and store segment data to piece store and store -segment checksum to SP DB. - -## Gateway -* Receives the Challenge request from the client. -* Verifies the signature of request to ensure that the request has not been tampered with. -* Checks the authorization to ensure the corresponding account has permissions on resources. -* Dispatches the request to Challenge. - -## Challenge -* Receives the Challenge request from the Gateway. -* Returns all segment data checksums and challenge segment data payload to the Gateway service. - * Retrieve all segment data checksums from the SP DB. - * Get the challenge segment data from the piece store. diff --git a/docs/workflow/workflow.md b/docs/workflow/workflow.md new file mode 100644 index 000000000..cc3054b62 --- /dev/null +++ b/docs/workflow/workflow.md @@ -0,0 +1,138 @@ +# SP Workflow + +This section will combine together all the current and existing workflows of SP to help you understand how SP works and how internal state flows. + +The workflow of SP is divided into the following six parts: GetApproval, UploadObject(Upload to PrimarySP,Replicate to SecondarySP), DownloadObject, ChallengePiece, GCObject and QueryMeta. GetApproval, UploadObject and DownloadObject belongs to front modules. Therefore, you should firstly send GetApproval requests before uploading objects. Then you can upload objects into SP and finally download objects from SP or query meta info about objects. ChallengePiece and GCObject belongs to background modules, you are not aware of these two moudles. + +## Get Approval + +Get Approval API provides two actions: CreateBucket and CreateObject. To upload an object into SP, you must first send a CreateBucket approval request, which will create a bucket on the Greenfield blockchain. If the request is successful, you can then send a CreateObject approval request. Both of these actions are used to determine whether SP is willing to serve the request. SP may reject users with a bad reputation or specific objects or buckets. SP approves the request by signing a message for the action and responding to the users. By default, SP will serve the request, but it can refuse if it chooses to do so. Each SP can customize its own strategy for accepting or rejecting requests. + +The flow chart is shown below: + +- Gateway receives GetApproval requests from the request originator. +- Gateway verifies the signature of request to ensure that the request has not been tampered with. +- Gateway invokes Authorizer to check the authorization to ensure the corresponding account is existed. +- Fills the CreateBucket/CreateObject message timeout field and dispatches the request to Signer service. +- Gets Signature from Signer, fills the message's approval signature field, and returns to the request originator. + +**Note** + +By default, each account can create a maximum of 100 buckets. + +If users send multiple CreateBucket or CreateObject approval requests in a short period of time, SP will provide the same results due to an expired blockchain height that is set to prevent repeated requests, such as DDoS attacks. + +See request and response details for this API: [GetApproval](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/get_approval.html). + +## Upload Object + +After successfully sending requests to the GetApproval API and receiving results, you can upload an object to SP. This API involves two steps: first, users manually upload an object to PrimarySP; second, after a successful upload to PrimarySP, the object is automatically replicated to secondarySP to ensure data reliability. + +Upload to PrimarySP flow chart is shown below: + +### Gateway + +- Gateway receives PutObject requests from client. +- Gateway verifies the signature of request to ensure that the request has not been tampered with. +- Gateway invokes Authorizer to check the authorization to ensure the corresponding account has permissions on resources. +- Dispatches the request to Uploader module. + +### Uploader + +- Uploader accepts object data in a streaming format and divides it into segments based on the `MaxSegmentSize`, which is determined by consensus in the Greenfield chain. The segmented data is then stored in the PieceStore. +- Uploader creates a JobContext with an initial state of `INIT_UNSPECIFIED`. Upon beginning the upload of segments, the JobContext's state transitions to `UPLOAD_OBJECT_DOING`. Once all segments have been uploaded, the JobContext's state changes to `UPLOAD_OBJECT_DONE`. In the event of any abnormal situations during the upload, the JobContext's state will change to `UPLOAD_OBJECT_ERROR`. +- After uploading all segments, insert segments data checksums and root checksum into the SP DB. +- Uploader creates an upload object task for Manager and returns a success message to the client indicating that the put object request is successful. + +Replicate to SecondarySP flow chart is shown below: + +### TaskExecutor + +- TaskExecutor requests tasks from the Manager, which then dispatches various job tasks such as ReplicatePieceTask, SealObjectTask, ChallengePieceTask, GCObjectTask, and so on. +- The object data is asynchronously replicated to secondary SPs, and Uploader can quickly receive a success message from TaskExecutor. The JobContext's state changes from `UPLOAD_OBJECT_DONE` to `ALLOC_SECONDARY_DOING`. +- TaskExecutor sends a GetSecondarySPApproval request to the P2P node, which broadcasts it to other SPs and collects the results back to the TaskExecutor for selecting the secondary SPs. The JobContext's state is immediately changed from `REPLICATE_OBJECT_DOING` into `ALLOC_SECONDARY_DONE`. +- TaskExecutor retrieves segments from the PieceStore in parallel and uses `Erasure Coding(EC)` to compute a data redundancy solution for these segments, generating the corresponding EC pieces. The EC pieces are then organized into six replicate data groups, with each group containing several EC pieces based on the Redundancy policy. +- Then sends the replicate data groups in streaming to the selected secondary SPs in parallel. +- The JobContext's secondary SP information is updated once the replication of a secondary SP is completed. The JobContext's state changes from `REPLICATE_OBJECT_DOING` to `REPLICATE_OBJECT_DONE` only after all secondary SPs have completed replication. + +### Receiver + +- Receiver checks whether the SecondarySP approval is self-signed and has timed out. If either of these conditions is true, the system returns a `SIGNATURE_ERROR` to TaskExecutor. +- Receiver works in secondary SP, receives EC pieces that belong to the same replicate data group, and uploads the EC pieces to the secondary SP PieceStore. +- Computes the EC pieces integrity checksum, sign the integrity checksum by SP's approval private key, then returns these to TaskExecutor. + +### TaskExecutor + +- Receives the response from secondary SPs' Receiver, and unsign the signature to compare with the secondary SP's approval public key. +- Sends the MsgSealObject to the Signer for signing the seal object transaction and broadcasting to the Greenfield chain with the secondary SPs' integrity hash and signature. The state of the JobContext turns to `SIGN_OBJECT_DOING` from `REPLICATE_OBJECT_DONE`. If Signer succeeds to broadcast the SealObjectTX, changes `SEAL_OBJECT_TX_DOING` state immediately into `SIGN_OBJECT_DONE` state. +- Monitor the execution results of seal object transaction on the Greenfield chain to determine whether the seal is successful. If so, the JobContext state is changed into `SEAL_OBJECT_DONE` state. + +See request and response details for this API: [PutObject](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/put_object.html). + +## Download Object + +Users can download an object from PrimarySP. The flow chart is shown below: + +### Gateway + +- Receives the GetObject request from the client. +- Verifies the signature of request to ensure that the request has not been tampered with. +- Checks the authorization to ensure the corresponding account has permissions on resources. +- Checks the object state and payment account state to ensure the object is sealed and the payment account is active. +- Dispatches the request to Downloader. + +### Downloader + +- Receives the GetObject request from the Gateway service. +- Check whether the read traffic exceeds the quota. + * If exceeds the quota, the Downloader refuses to serve and returns a not-enough-quota error to the Gateway. + * If the quota is sufficient, Downloader inserts read record into the SP traffic-db. +- Downloader splits the GetObject request into GetPiece requests (which support range reads) to retrieve the corresponding piece payload data. Downloader then streams the object payload data back to the Gateway. + +See request and response details for this API: [GetObject](https://greenfield.bnbchain.org/docs/api-sdk/storgae-provider-rest/get_object.html). + +## QueryMeta + +Users maybe want to query some metadata about buckets, objects, bucket read quota or bucket read records from SP. SP provides related APIs about querying metatdata. The flow chart is shown below: + +### Gateway + +- Receives the QueryMeta request from the client. +- Verifies the signature of request to ensure that the request has not been tampered with. +- Checks the authorization to ensure the corresponding account has permissions on resources. +- Dispatches the request to Metadata. + +### Metadata + +- Metadata receives the QueryMeta request from Gateway. +- Metadata queries bucket or object from SP DB or BS DB. + +## Challenge Piece + +Ensuring data integrity and availability is always the top priority for any decentralized storage network. To achieve better high availability (HA), we use data challenges instead of storage proofs. The system continuously issues data challenges to random pieces on the greenfield chain, and SP that stores the challenged piece responds using the challenge workflow. Each SP splits the object payload data into segments, stores the segment data in the PieceStore, and stores the segment checksum in SP DB. + +The flow chart is shown below: + +### Gateway + +- Receives the Challenge request from the client. +- Verifies the signature of request to ensure that the request has not been tampered with. +- Checks the authorization to ensure the corresponding account has permissions on resources. +- Dispatches the request to Downloader. + +### Downloader + +- Downloader receives the Challenge request from Gateway. +- Returns all segment data checksums and challenge segment data payload to Gateway. + * Retrieve all segment data checksums from the SP DB. + * Get the challenge segment data from PieceStore. + +## GC Object + +GC is used to delete objects whose metadata on Greenfield chain has already been deleted, reducing the cost of each SP and data size in Greenfield chain. This function is automatically executed in Manager daemon mode. + +The flow chart is shown below: + +- Manager dispatches GCObjectTask to TaskExecutor. +- TaskExecutor send requests to Metadata to query deleted objects in order. +- TaskExecutor delete payload data which is stored in PieceStore. diff --git a/store/piecestore/README.md b/store/piecestore/README.md index 135dc4403..a70543c64 100644 --- a/store/piecestore/README.md +++ b/store/piecestore/README.md @@ -10,8 +10,8 @@ ## CheckList -- [ ] s3: amazon S3 -- [ ] file: local file, using disk persistance +- [ ] s3: AWS S3 +- [ ] file: local file, using disk persistence - [ ] memory: memory storage, if server reboot, no data in disk - [ ] minio: MinIO @@ -66,4 +66,3 @@ If BucketURL is configured in environment, all services will use the same bucket If `Shards` is not set in config.toml, the shard is 0, PieceStore won't shard. > More storage providers will be supported - From 53ab18fbbf70bd12b5edd587ba629f2a59ba9d50 Mon Sep 17 00:00:00 2001 From: Alexxxxxx <118710506+alexgao001@users.noreply.github.com> Date: Tue, 20 Jun 2023 17:34:03 +0800 Subject: [PATCH 22/78] fix: broadcast tx bypass simulation and sync mode (#636) * fix: use fix gas to avoid simulation and sync mode for broadcasting tx * fix: lint --- base/gfspconfig/config.go | 11 +++++-- modular/signer/signer_client.go | 45 ++++++++++++++++++++--------- modular/signer/signer_options.go | 49 +++++++++++++++++++++++++++++--- 3 files changed, 84 insertions(+), 21 deletions(-) diff --git a/base/gfspconfig/config.go b/base/gfspconfig/config.go index 38819aeae..4a706a60c 100644 --- a/base/gfspconfig/config.go +++ b/base/gfspconfig/config.go @@ -84,9 +84,14 @@ func (cfg *GfSpConfig) String() string { } type ChainConfig struct { - ChainID string - ChainAddress []string - GasLimit uint64 + ChainID string + ChainAddress []string + SealGasLimit uint64 + SealFeeAmount uint64 + RejectSealGasLimit uint64 + RejectSealFeeAmount uint64 + DiscontinueBucketGasLimit uint64 + DiscontinueBucketFeeAmount uint64 } type SpAccountConfig struct { diff --git a/modular/signer/signer_client.go b/modular/signer/signer_client.go index fadc4a0ac..92f34a8cd 100644 --- a/modular/signer/signer_client.go +++ b/modular/signer/signer_client.go @@ -21,6 +21,8 @@ import ( // SignType is the type of msg signature type SignType string +type GasInfoType string + const ( // SignOperator is the type of signature signed by the operator account SignOperator SignType = "operator" @@ -39,20 +41,29 @@ const ( // BroadcastTxRetry defines the max retry for broadcasting tx on-chain BroadcastTxRetry = 3 + + Seal GasInfoType = "Seal" + RejectSeal GasInfoType = "RejectSeal" + DiscontinueBucket GasInfoType = "DiscontinueBucket" ) +type GasInfo struct { + GasLimit uint64 + FeeAmount sdk.Coins +} + // GreenfieldChainSignClient the greenfield chain client type GreenfieldChainSignClient struct { mu sync.Mutex - gasLimit uint64 + gasInfo map[GasInfoType]GasInfo greenfieldClients map[SignType]*client.GreenfieldClient sealAccNonce uint64 gcAccNonce uint64 } // NewGreenfieldChainSignClient return the GreenfieldChainSignClient instance -func NewGreenfieldChainSignClient(rpcAddr, chainID string, gasLimit uint64, operatorPrivateKey, fundingPrivateKey, +func NewGreenfieldChainSignClient(rpcAddr, chainID string, gasInfo map[GasInfoType]GasInfo, operatorPrivateKey, fundingPrivateKey, sealPrivateKey, approvalPrivateKey, gcPrivateKey string) (*GreenfieldChainSignClient, error) { // init clients // TODO: Get private key from KMS(AWS, GCP, Azure, Aliyun) @@ -127,7 +138,7 @@ func NewGreenfieldChainSignClient(rpcAddr, chainID string, gasLimit uint64, oper } return &GreenfieldChainSignClient{ - gasLimit: gasLimit, + gasInfo: gasInfo, greenfieldClients: greenfieldClients, sealAccNonce: sealAccNonce, gcAccNonce: gcAccNonce, @@ -195,7 +206,7 @@ func (client *GreenfieldChainSignClient) SealObject( msgSealObject := storagetypes.NewMsgSealObject(km.GetAddr(), sealObject.BucketName, sealObject.ObjectName, secondarySPAccs, sealObject.SecondarySpSignatures) - mode := tx.BroadcastMode_BROADCAST_MODE_ASYNC + mode := tx.BroadcastMode_BROADCAST_MODE_SYNC var ( resp *tx.BroadcastTxResponse @@ -205,9 +216,11 @@ func (client *GreenfieldChainSignClient) SealObject( nonce = client.sealAccNonce for i := 0; i < BroadcastTxRetry; i++ { txOpt := &ctypes.TxOption{ - Mode: &mode, - GasLimit: client.gasLimit, - Nonce: nonce, + NoSimulate: true, + Mode: &mode, + GasLimit: client.gasInfo[Seal].GasLimit, + FeeAmount: client.gasInfo[Seal].FeeAmount, + Nonce: nonce, } resp, err = client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgSealObject}, txOpt) if err != nil { @@ -268,7 +281,7 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( defer client.mu.Unlock() msgRejectUnSealObject := storagetypes.NewMsgRejectUnsealedObject(km.GetAddr(), rejectObject.GetBucketName(), rejectObject.GetObjectName()) - mode := tx.BroadcastMode_BROADCAST_MODE_ASYNC + mode := tx.BroadcastMode_BROADCAST_MODE_SYNC var ( resp *tx.BroadcastTxResponse @@ -278,9 +291,11 @@ func (client *GreenfieldChainSignClient) RejectUnSealObject( nonce = client.sealAccNonce for i := 0; i < BroadcastTxRetry; i++ { txOpt := &ctypes.TxOption{ - Mode: &mode, - GasLimit: client.gasLimit, - Nonce: nonce, + NoSimulate: true, + Mode: &mode, + GasLimit: client.gasInfo[RejectSeal].GasLimit, + FeeAmount: client.gasInfo[RejectSeal].FeeAmount, + Nonce: nonce, } resp, err = client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgRejectUnSealObject}, txOpt) if err != nil { @@ -338,9 +353,11 @@ func (client *GreenfieldChainSignClient) DiscontinueBucket(ctx context.Context, discontinueBucket.BucketName, discontinueBucket.Reason) mode := tx.BroadcastMode_BROADCAST_MODE_SYNC txOpt := &ctypes.TxOption{ - Mode: &mode, - GasLimit: client.gasLimit, - Nonce: nonce, + NoSimulate: true, + Mode: &mode, + GasLimit: client.gasInfo[DiscontinueBucket].GasLimit, + FeeAmount: client.gasInfo[DiscontinueBucket].FeeAmount, + Nonce: nonce, } resp, err := client.greenfieldClients[scope].BroadcastTx(ctx, []sdk.Msg{msgDiscontinueBucket}, txOpt) diff --git a/modular/signer/signer_options.go b/modular/signer/signer_options.go index e0c4c33e2..e02503484 100644 --- a/modular/signer/signer_options.go +++ b/modular/signer/signer_options.go @@ -4,14 +4,25 @@ import ( "fmt" "os" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/bnb-chain/greenfield-storage-provider/base/gfspapp" "github.com/bnb-chain/greenfield-storage-provider/base/gfspconfig" coremodule "github.com/bnb-chain/greenfield-storage-provider/core/module" + "github.com/bnb-chain/greenfield/sdk/types" ) const ( // DefaultGasLimit defines the default gas limit - DefaultGasLimit = 210000 + DefaultSealGasLimit = 1200 // fix gas limit for msgSealObject is 1200 + DefaultSealFeeAmount = 6000000000000 + + DefaultRejectSealGasLimit = 12000 // fix gas limit for MsgRejectSealObject is 12000 + DefaultRejectSealFeeAmount = 60000000000000 + + DefaultDiscontinueBucketGasLimit = 2400 // fix gas limit for MsgDiscontinueBucket is 2400 + DefaultDiscontinueBucketFeeAmount = 12000000000000 + // SpOperatorPrivKey defines env variable name for sp operator private key SpOperatorPrivKey = "SIGNER_OPERATOR_PRIV_KEY" // SpFundingPrivKey defines env variable name for sp funding private key @@ -36,8 +47,23 @@ func DefaultSignerOptions(signer *SignModular, cfg *gfspconfig.GfSpConfig) error if len(cfg.Chain.ChainAddress) == 0 { return fmt.Errorf("chain address missing") } - if cfg.Chain.GasLimit == 0 { - cfg.Chain.GasLimit = DefaultGasLimit + if cfg.Chain.SealGasLimit == 0 { + cfg.Chain.SealGasLimit = DefaultSealGasLimit + } + if cfg.Chain.SealFeeAmount == 0 { + cfg.Chain.SealFeeAmount = DefaultSealFeeAmount + } + if cfg.Chain.RejectSealGasLimit == 0 { + cfg.Chain.SealGasLimit = DefaultRejectSealGasLimit + } + if cfg.Chain.RejectSealFeeAmount == 0 { + cfg.Chain.SealFeeAmount = DefaultRejectSealFeeAmount + } + if cfg.Chain.DiscontinueBucketGasLimit == 0 { + cfg.Chain.SealGasLimit = DefaultDiscontinueBucketGasLimit + } + if cfg.Chain.DiscontinueBucketFeeAmount == 0 { + cfg.Chain.DiscontinueBucketFeeAmount = DefaultDiscontinueBucketFeeAmount } if val, ok := os.LookupEnv(SpOperatorPrivKey); ok { cfg.SpAccount.OperatorPrivateKey = val @@ -54,8 +80,23 @@ func DefaultSignerOptions(signer *SignModular, cfg *gfspconfig.GfSpConfig) error if val, ok := os.LookupEnv(SpGcPrivKey); ok { cfg.SpAccount.GcPrivateKey = val } + + gasInfo := make(map[GasInfoType]GasInfo) + gasInfo[Seal] = GasInfo{ + GasLimit: cfg.Chain.SealGasLimit, + FeeAmount: sdk.NewCoins(sdk.NewCoin(types.Denom, sdk.NewInt(int64(cfg.Chain.SealFeeAmount)))), + } + gasInfo[RejectSeal] = GasInfo{ + GasLimit: cfg.Chain.RejectSealGasLimit, + FeeAmount: sdk.NewCoins(sdk.NewCoin(types.Denom, sdk.NewInt(int64(cfg.Chain.RejectSealFeeAmount)))), + } + gasInfo[DiscontinueBucket] = GasInfo{ + GasLimit: cfg.Chain.DiscontinueBucketGasLimit, + FeeAmount: sdk.NewCoins(sdk.NewCoin(types.Denom, sdk.NewInt(int64(cfg.Chain.DiscontinueBucketFeeAmount)))), + } + client, err := NewGreenfieldChainSignClient(cfg.Chain.ChainAddress[0], cfg.Chain.ChainID, - cfg.Chain.GasLimit, cfg.SpAccount.OperatorPrivateKey, cfg.SpAccount.FundingPrivateKey, + gasInfo, cfg.SpAccount.OperatorPrivateKey, cfg.SpAccount.FundingPrivateKey, cfg.SpAccount.SealPrivateKey, cfg.SpAccount.ApprovalPrivateKey, cfg.SpAccount.GcPrivateKey) if err != nil { return err From 8080529280664c1768507ba3adf2931bef94b589 Mon Sep 17 00:00:00 2001 From: joeycli Date: Wed, 21 Jun 2023 14:08:50 +0800 Subject: [PATCH 23/78] fix: report task type check (#639) * fix: report task type check * fix: receive task retry * feat: add upload event trace * fix: error nil bug * feat: add more executor event * chore: change task timeout * chore: change receive task priority --- base/gfspapp/manage_server.go | 8 +++++ base/gfspapp/task_options.go | 20 ++++++------ base/gfspapp/upload_server.go | 4 +++ base/gfspclient/manager.go | 3 ++ base/gfsptqueue/queue.go | 3 ++ base/gfsptqueue/queue_limit.go | 3 ++ core/spdb/spdb.go | 25 +++++++++++++++ modular/executor/execute_replicate.go | 17 ++++++++++ modular/executor/execute_task.go | 46 ++++++++++++++++----------- modular/executor/executor.go | 11 +++++++ modular/gater/object_handler.go | 4 +++ modular/manager/manager.go | 8 ++--- store/sqldb/const.go | 2 ++ store/sqldb/store.go | 4 +++ store/sqldb/upload_event.go | 19 +++++++++++ store/sqldb/upload_event_schema.go | 15 +++++++++ 16 files changed, 159 insertions(+), 33 deletions(-) create mode 100644 store/sqldb/upload_event.go create mode 100644 store/sqldb/upload_event_schema.go diff --git a/base/gfspapp/manage_server.go b/base/gfspapp/manage_server.go index 8bf3f1d02..3854cf870 100644 --- a/base/gfspapp/manage_server.go +++ b/base/gfspapp/manage_server.go @@ -9,6 +9,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsptask" corercmgr "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" + corespdb "github.com/bnb-chain/greenfield-storage-provider/core/spdb" coretask "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" @@ -29,7 +30,12 @@ func (g *GfSpBaseApp) GfSpBeginTask(ctx context.Context, req *gfspserver.GfSpBeg } switch task := req.GetRequest().(type) { case *gfspserver.GfSpBeginTaskRequest_UploadObjectTask: + g.GfSpDB().InsertUploadEvent(task.UploadObjectTask.GetObjectInfo().Id.Uint64(), corespdb.ManagerReceiveAndWaitSchedulingTask, task.UploadObjectTask.Key().String()) err := g.OnBeginUploadObjectTask(ctx, task.UploadObjectTask) + if err != nil { + g.GfSpDB().InsertUploadEvent(task.UploadObjectTask.GetObjectInfo().Id.Uint64(), corespdb.ManagerReceiveAndWaitSchedulingTask, task.UploadObjectTask.Key().String()+":"+err.Error()) + } + g.GfSpDB().InsertUploadEvent(task.UploadObjectTask.GetObjectInfo().Id.Uint64(), corespdb.ManagerReceiveAndWaitSchedulingTask, task.UploadObjectTask.Key().String()+":") return &gfspserver.GfSpBeginTaskResponse{Err: gfsperrors.MakeGfSpError(err)}, nil default: return &gfspserver.GfSpBeginTaskResponse{Err: ErrUnsupportedTaskType}, nil @@ -68,11 +74,13 @@ func (g *GfSpBaseApp) GfSpAskTask(ctx context.Context, req *gfspserver.GfSpAskTa ReplicatePieceTask: t, } metrics.DispatchReplicatePieceTaskCounter.WithLabelValues(g.manager.Name()).Inc() + g.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ManagerSchedulingTask, t.Key().String()) case *gfsptask.GfSpSealObjectTask: resp.Response = &gfspserver.GfSpAskTaskResponse_SealObjectTask{ SealObjectTask: t, } metrics.DispatchSealObjectTaskCounter.WithLabelValues(g.manager.Name()).Inc() + g.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ManagerSchedulingTask, t.Key().String()) case *gfsptask.GfSpReceivePieceTask: resp.Response = &gfspserver.GfSpAskTaskResponse_ReceivePieceTask{ ReceivePieceTask: t, diff --git a/base/gfspapp/task_options.go b/base/gfspapp/task_options.go index 45495ddb4..712dae986 100644 --- a/base/gfspapp/task_options.go +++ b/base/gfspapp/task_options.go @@ -16,17 +16,17 @@ const ( // MaxUploadTime defines the max timeout to upload object. MaxUploadTime int64 = 300 // MinReplicateTime defines the min timeout to replicate object. - MinReplicateTime int64 = 15 + MinReplicateTime int64 = 90 // MaxReplicateTime defines the max timeout to replicate object. MaxReplicateTime int64 = 500 // MinReceiveTime defines the min timeout to confirm the received piece whether is sealed on greenfield. - MinReceiveTime int64 = 10 + MinReceiveTime int64 = 90 // MaxReceiveTime defines the max timeout to confirm the received piece whether is sealed on greenfield. - MaxReceiveTime int64 = 30 + MaxReceiveTime int64 = 300 // MinSealObjectTime defines the min timeout to seal object to greenfield. - MinSealObjectTime int64 = 2 + MinSealObjectTime int64 = 90 // MaxSealObjectTime defines the max timeout to seal object to greenfield. - MaxSealObjectTime int64 = 5 + MaxSealObjectTime int64 = 300 // MinDownloadTime defines the min timeout to download object. MinDownloadTime int64 = 2 // MaxDownloadTime defines the max timeout to download object. @@ -51,9 +51,9 @@ const ( // MaxReplicateRetry defines the max retry number to replicate object. MaxReplicateRetry = 6 // MinReceiveConfirmRetry defines the min retry number to confirm received piece is sealed on greenfield. - MinReceiveConfirmRetry = 20 + MinReceiveConfirmRetry = 0 // MaxReceiveConfirmRetry defines the max retry number to confirm received piece is sealed on greenfield. - MaxReceiveConfirmRetry = 60 + MaxReceiveConfirmRetry = 3 // MinSealObjectRetry defines the min retry number to seal object. MinSealObjectRetry = 3 // MaxSealObjectRetry defines the max retry number to seal object. @@ -235,11 +235,11 @@ func (g *GfSpBaseApp) TaskPriority(task coretask.Task) coretask.TPriority { case coretask.TypeTaskUpload: return coretask.UnSchedulingPriority case coretask.TypeTaskReplicatePiece: - return coretask.DefaultLargerTaskPriority - case coretask.TypeTaskReceivePiece: return coretask.MaxTaskPriority + case coretask.TypeTaskReceivePiece: + return coretask.DefaultSmallerPriority case coretask.TypeTaskSealObject: - return coretask.MaxTaskPriority + return coretask.DefaultLargerTaskPriority case coretask.TypeTaskDownloadObject: return coretask.UnSchedulingPriority case coretask.TypeTaskChallengePiece: diff --git a/base/gfspapp/upload_server.go b/base/gfspapp/upload_server.go index 62aa07ffb..43b3c1939 100644 --- a/base/gfspapp/upload_server.go +++ b/base/gfspapp/upload_server.go @@ -10,6 +10,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/base/types/gfspserver" "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsptask" "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" + corespdb "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" ) @@ -34,12 +35,14 @@ func (g *GfSpBaseApp) GfSpUploadObject(stream gfspserver.GfSpUploadService_GfSpU err error receiveSize int ) + defer func() { defer cancel() if span != nil { span.Done() } if task != nil { + g.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), corespdb.UploaderEndReceiveData, task.Key().String()) metrics.UploadObjectSizeHistogram.WithLabelValues(g.uploader.Name()).Observe( float64(task.GetObjectInfo().GetPayloadSize())) g.uploader.PostUploadObject(ctx, task) @@ -100,6 +103,7 @@ func (g *GfSpBaseApp) GfSpUploadObject(stream gfspserver.GfSpUploadService_GfSpU pWrite.CloseWithError(err) return } + g.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), corespdb.UploaderBeginReceiveData, task.Key().String()) ctx = log.WithValue(ctx, log.CtxKeyTask, task.Key().String()) span, err = g.uploader.ReserveResource(ctx, task.EstimateLimit().ScopeStat()) if err != nil { diff --git a/base/gfspclient/manager.go b/base/gfspclient/manager.go index 3f11e16b3..3d06ee5c7 100644 --- a/base/gfspclient/manager.go +++ b/base/gfspclient/manager.go @@ -117,6 +117,9 @@ func (s *GfSpClient) ReportTask(ctx context.Context, report coretask.Task) error req.Request = &gfspserver.GfSpReportTaskRequest_ChallengePieceTask{ ChallengePieceTask: t, } + default: + log.CtxErrorw(ctx, "unsupported task type to report") + return ErrTypeMismatch } resp, err := gfspserver.NewGfSpManageServiceClient(conn).GfSpReportTask(ctx, req) if err != nil { diff --git a/base/gfsptqueue/queue.go b/base/gfsptqueue/queue.go index 61ba91ffb..996390ba3 100644 --- a/base/gfsptqueue/queue.go +++ b/base/gfsptqueue/queue.go @@ -199,6 +199,9 @@ func (t *GfSpTQueue) top() coretask.Task { if index == len(backupTasks) { index = 0 } + if backupTasks[index] != nil { + t.current = backupTasks[index].GetCreateTime() + } return backupTasks[index] } diff --git a/base/gfsptqueue/queue_limit.go b/base/gfsptqueue/queue_limit.go index 9e54c8b82..70d284fe3 100644 --- a/base/gfsptqueue/queue_limit.go +++ b/base/gfsptqueue/queue_limit.go @@ -191,6 +191,9 @@ func (t *GfSpTQueueWithLimit) topByLimit(limit corercmgr.Limit) coretask.Task { if index == len(backupTasks) { index = 0 } + if backupTasks[index] != nil { + t.current = backupTasks[index].GetCreateTime() + } return backupTasks[index] } diff --git a/core/spdb/spdb.go b/core/spdb/spdb.go index 11037e1ac..a145655e9 100644 --- a/core/spdb/spdb.go +++ b/core/spdb/spdb.go @@ -7,6 +7,29 @@ import ( sptypes "github.com/bnb-chain/greenfield/x/sp/types" ) +const ( + GatewayBeginReceiveUpload = "gateway_begin_receive_upload" + GatewayEndReceiveUpload = "gateway_end_receive_upload" + UploaderBeginReceiveData = "uploader_begin_receive_data" + UploaderEndReceiveData = "uploader_end_receive_data" + ManagerReceiveAndWaitSchedulingTask = "manager_receive_and_wait_scheduling_task" + ManagerSchedulingTask = "manager_scheduling_task" + ExecutorBeginTask = "executor_begin_task" + ExecutorEndTask = "executor_end_task" + ExecutorBeginP2P = "executor_begin_p2p" + ExecutorEndP2P = "executor_end_p2p" + ExecutorBeginReplicateOnePiece = "executor_begin_replicate_one_piece" + ExecutorEndReplicateOnePiece = "executor_end_replicate_one_piece" + ExecutorBeginReplicateAllPiece = "executor_begin_replicate_all_piece" + ExecutorEndReplicateAllPiece = "executor_end_replicate_all_piece" + ExecutorBeginDoneReplicatePiece = "executor_begin_done_replicate_piece" + ExecutorEndDoneReplicatePiece = "executor_end_done_replicate_piece" + ExecutorBeginSealTx = "executor_begin_seal_tx" + ExecutorEndSealTx = "executor_end_seal_tx" + ExecutorBeginConfirmSeal = "executor_begin_confirm_seal" + ExecutorEndConfirmSeal = "executor_end_confirm_seal" +) + // UploadObjectProgressDB interface which records upload object related progress(includes foreground and background) and state. type UploadObjectProgressDB interface { // InsertUploadProgress inserts a new upload object progress. @@ -23,6 +46,8 @@ type UploadObjectProgressDB interface { // GetUploadMetasToSeal queries the latest replicate_done/seal_doing object to continue seal. // It is only used in startup. GetUploadMetasToSeal(limit int) ([]*UploadObjectMeta, error) + // InsertUploadEvent inserts a new upload event progress. + InsertUploadEvent(objectID uint64, state string, description string) error } // GCObjectProgressDB interface which records gc object related progress. diff --git a/modular/executor/execute_replicate.go b/modular/executor/execute_replicate.go index 4e529ba08..0303832db 100644 --- a/modular/executor/execute_replicate.go +++ b/modular/executor/execute_replicate.go @@ -22,6 +22,11 @@ import ( storagetypes "github.com/bnb-chain/greenfield/x/storage/types" ) +//ExecutorBeginSealTx = "executor_begin_seal_tx" +//ExecutorEndSealTx = "executor_end_seal_tx" +//ExecutorBeginConfirmSeal = "executor_begin_confirm_seal" +//ExecutorEndConfirmSeal = "executor_end_confirm_seal" + func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task coretask.ReplicatePieceTask) { var ( err error @@ -43,22 +48,28 @@ func (e *ExecuteModular) HandleReplicatePieceTask(ctx context.Context, task core rAppTask.InitApprovalReplicatePieceTask(task.GetObjectInfo(), task.GetStorageParams(), e.baseApp.TaskPriority(rAppTask), e.baseApp.OperatorAddress()) askReplicateApprovalTime := time.Now() + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorBeginP2P, task.Key().String()) approvals, err = e.AskReplicatePieceApproval(ctx, rAppTask, int(low), int(high), e.askReplicateApprovalTimeout) metrics.PerfUploadTimeHistogram.WithLabelValues("background_ask_p2p_approval_time").Observe(time.Since(askReplicateApprovalTime).Seconds()) metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_p2p_end_time").Observe(time.Since(startReplicateTime).Seconds()) if err != nil { + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndP2P, task.Key().String()+":"+err.Error()) log.CtxErrorw(ctx, "failed get approvals", "error", err) return } + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndP2P, task.Key().String()) replicatePieceTotalTime := time.Now() + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorBeginReplicateAllPiece, task.Key().String()) err = e.handleReplicatePiece(ctx, task, approvals) metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_object_time").Observe(time.Since(replicatePieceTotalTime).Seconds()) metrics.PerfUploadTimeHistogram.WithLabelValues("background_task_replicate_object_end_time").Observe(time.Since(startReplicateTime).Seconds()) if err != nil { + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndReplicateAllPiece, task.Key().String()+":"+err.Error()) log.CtxErrorw(ctx, "failed to replicate piece", "error", err) return } + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndReplicateAllPiece, task.Key().String()) log.CtxDebugw(ctx, "succeed to replicate all pieces") // combine seal object sealMsg := &storagetypes.MsgSealObject{ @@ -258,15 +269,18 @@ func (e *ExecuteModular) doReplicatePiece(ctx context.Context, waitGroup *sync.W } receive.SetSignature(signature) replicateOnePieceTime := time.Now() + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorBeginReplicateOnePiece, receive.Info()) err = e.baseApp.GfSpClient().ReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive, data) metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_time").Observe(time.Since(replicateOnePieceTime).Seconds()) metrics.PerfUploadTimeHistogram.WithLabelValues("background_replicate_one_piece_end_time").Observe(time.Since(startTime).Seconds()) if err != nil { + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndReplicateOnePiece, receive.Info()+":"+err.Error()) log.CtxErrorw(ctx, "failed to replicate piece", "replicate_idx", replicateIdx, "piece_idx", pieceIdx, "error", err) return } + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndReplicateOnePiece, receive.Info()) log.CtxDebugw(ctx, "success to replicate piece", "replicate_idx", replicateIdx, "piece_idx", pieceIdx) return @@ -294,16 +308,19 @@ func (e *ExecuteModular) doneReplicatePiece(ctx context.Context, rTask coretask. } receive.SetSignature(taskSignature) doneReplicateTime := time.Now() + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorBeginDoneReplicatePiece, receive.Info()) integrity, signature, err = e.baseApp.GfSpClient().DoneReplicatePieceToSecondary(ctx, approval.GetApprovedSpEndpoint(), approval, receive) metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_time").Observe(time.Since(doneReplicateTime).Seconds()) metrics.PerfUploadTimeHistogram.WithLabelValues("background_done_receive_http_end_time").Observe(time.Since(signTime).Seconds()) if err != nil { + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndDoneReplicatePiece, receive.Info()+":"+err.Error()) log.CtxErrorw(ctx, "failed to done replicate piece", "endpoint", approval.GetApprovedSpEndpoint(), "replicate_idx", replicateIdx, "error", err) return nil, nil, err } + e.baseApp.GfSpDB().InsertUploadEvent(rTask.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndDoneReplicatePiece, receive.Info()) if int(replicateIdx+1) >= len(rTask.GetObjectInfo().GetChecksums()) { log.CtxErrorw(ctx, "failed to done replicate piece, replicate idx out of bounds", "replicate_idx", replicateIdx, diff --git a/modular/executor/execute_task.go b/modular/executor/execute_task.go index 5403130d9..b690c4d70 100644 --- a/modular/executor/execute_task.go +++ b/modular/executor/execute_task.go @@ -9,6 +9,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsperrors" "github.com/bnb-chain/greenfield-storage-provider/core/module" + "github.com/bnb-chain/greenfield-storage-provider/core/spdb" coretask "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/modular/manager" "github.com/bnb-chain/greenfield-storage-provider/modular/metadata/types" @@ -48,12 +49,15 @@ func (e *ExecuteModular) HandleSealObjectTask(ctx context.Context, task coretask func (e *ExecuteModular) sealObject(ctx context.Context, task coretask.ObjectTask, sealMsg *storagetypes.MsgSealObject) error { var err error for retry := int64(0); retry <= task.GetMaxRetry(); retry++ { + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorBeginSealTx, task.Key().String()) err = e.baseApp.GfSpClient().SealObject(ctx, sealMsg) if err != nil { log.CtxErrorw(ctx, "failed to seal object", "retry", retry, "max_retry", task.GetMaxRetry(), "error", err) + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndSealTx, task.Key().String()+":"+err.Error()) time.Sleep(time.Duration(e.listenSealRetryTimeout) * time.Second) } else { + e.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), spdb.ExecutorEndSealTx, task.Key().String()) break } } @@ -69,9 +73,11 @@ func (e *ExecuteModular) sealObject(ctx context.Context, task coretask.ObjectTas func (e *ExecuteModular) listenSealObject(ctx context.Context, object *storagetypes.ObjectInfo) error { var err error for retry := 0; retry < e.maxListenSealRetry; retry++ { + e.baseApp.GfSpDB().InsertUploadEvent(object.Id.Uint64(), spdb.ExecutorBeginConfirmSeal, "") sealed, innerErr := e.baseApp.Consensus().ListenObjectSeal(ctx, object.Id.Uint64(), e.listenSealTimeoutHeight) if innerErr != nil { + e.baseApp.GfSpDB().InsertUploadEvent(object.Id.Uint64(), spdb.ExecutorEndConfirmSeal, "err:"+innerErr.Error()) log.CtxErrorw(ctx, "failed to listen object seal", "retry", retry, "max_retry", e.maxListenSealRetry, "error", err) time.Sleep(time.Duration(e.listenSealRetryTimeout) * time.Second) @@ -79,11 +85,13 @@ func (e *ExecuteModular) listenSealObject(ctx context.Context, object *storagety continue } if !sealed { + e.baseApp.GfSpDB().InsertUploadEvent(object.Id.Uint64(), spdb.ExecutorEndConfirmSeal, "unsealed") log.CtxErrorw(ctx, "failed to seal object on chain", "retry", retry, "max_retry", e.maxListenSealRetry, "error", err) err = ErrUnsealed continue } + e.baseApp.GfSpDB().InsertUploadEvent(object.Id.Uint64(), spdb.ExecutorEndConfirmSeal, "sealed") err = nil break } @@ -134,25 +142,25 @@ func (e *ExecuteModular) HandleReceivePieceTask(ctx context.Context, task coreta "current", e.baseApp.OperatorAddress()) task.SetError(ErrSecondaryMismatch) // TODO:: gc zombie task will gc the zombie piece, it is a conservative plan - //err = e.baseApp.GfSpDB().DeleteObjectIntegrity(task.GetObjectInfo().Id.Uint64()) - //if err != nil { - // log.CtxErrorw(ctx, "failed to delete integrity") - //} - //var pieceKey string - //segmentCount := e.baseApp.PieceOp().SegmentPieceCount(onChainObject.GetPayloadSize(), - // task.GetStorageParams().GetMaxPayloadSize()) - //for i := uint32(0); i < segmentCount; i++ { - // if task.GetObjectInfo().GetRedundancyType() == storagetypes.REDUNDANCY_EC_TYPE { - // pieceKey = e.baseApp.PieceOp().ECPieceKey(onChainObject.Id.Uint64(), - // i, task.GetReplicateIdx()) - // } else { - // pieceKey = e.baseApp.PieceOp().SegmentPieceKey(onChainObject.Id.Uint64(), i) - // } - // err = e.baseApp.PieceStore().DeletePiece(ctx, pieceKey) - // if err != nil { - // log.CtxErrorw(ctx, "failed to delete piece data", "piece_key", pieceKey) - // } - //} + err = e.baseApp.GfSpDB().DeleteObjectIntegrity(task.GetObjectInfo().Id.Uint64()) + if err != nil { + log.CtxErrorw(ctx, "failed to delete integrity") + } + var pieceKey string + segmentCount := e.baseApp.PieceOp().SegmentPieceCount(onChainObject.GetPayloadSize(), + task.GetStorageParams().GetMaxPayloadSize()) + for i := uint32(0); i < segmentCount; i++ { + if task.GetObjectInfo().GetRedundancyType() == storagetypes.REDUNDANCY_EC_TYPE { + pieceKey = e.baseApp.PieceOp().ECPieceKey(onChainObject.Id.Uint64(), + i, task.GetReplicateIdx()) + } else { + pieceKey = e.baseApp.PieceOp().SegmentPieceKey(onChainObject.Id.Uint64(), i) + } + err = e.baseApp.PieceStore().DeletePiece(ctx, pieceKey) + if err != nil { + log.CtxErrorw(ctx, "failed to delete piece data", "piece_key", pieceKey) + } + } return } log.CtxDebugw(ctx, "succeed to handle confirm receive piece task") diff --git a/modular/executor/executor.go b/modular/executor/executor.go index e1f7c4929..41b19ddfb 100644 --- a/modular/executor/executor.go +++ b/modular/executor/executor.go @@ -12,6 +12,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/base/types/gfsptask" "github.com/bnb-chain/greenfield-storage-provider/core/module" corercmgr "github.com/bnb-chain/greenfield-storage-provider/core/rcmgr" + corespdb "github.com/bnb-chain/greenfield-storage-provider/core/spdb" coretask "github.com/bnb-chain/greenfield-storage-provider/core/task" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" @@ -147,12 +148,22 @@ func (e *ExecuteModular) AskTask(ctx context.Context) error { atomic.AddInt64(&e.doingReplicatePieceTaskCnt, 1) defer atomic.AddInt64(&e.doingReplicatePieceTaskCnt, -1) metrics.PerfUploadTimeHistogram.WithLabelValues("background_schedule_replicate_time").Observe(time.Since(time.Unix(t.GetCreateTime(), 0)).Seconds()) + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorBeginTask, t.Key().String()) e.HandleReplicatePieceTask(ctx, t) + if t.Error() != nil { + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorEndTask, t.Key().String()+":"+t.Error().Error()) + } + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorEndTask, t.Key().String()) case *gfsptask.GfSpSealObjectTask: metrics.ExecutorSealObjectTaskCounter.WithLabelValues(e.Name()).Inc() atomic.AddInt64(&e.doingSpSealObjectTaskCnt, 1) defer atomic.AddInt64(&e.doingSpSealObjectTaskCnt, -1) + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorBeginTask, t.Key().String()) e.HandleSealObjectTask(ctx, t) + if t.Error() != nil { + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorEndTask, t.Key().String()+":"+t.Error().Error()) + } + e.baseApp.GfSpDB().InsertUploadEvent(t.GetObjectInfo().Id.Uint64(), corespdb.ExecutorEndTask, t.Key().String()) case *gfsptask.GfSpReceivePieceTask: metrics.ExecutorReceiveTaskCounter.WithLabelValues(e.Name()).Inc() atomic.AddInt64(&e.doingReceivePieceTaskCnt, 1) diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index 05fa433cc..8c9893563 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -9,6 +9,7 @@ import ( "strings" "time" + corespdb "github.com/bnb-chain/greenfield-storage-provider/core/spdb" "github.com/bnb-chain/greenfield-storage-provider/modular/downloader" "github.com/bnb-chain/greenfield-storage-provider/pkg/metrics" "github.com/bnb-chain/greenfield/types/s3util" @@ -91,10 +92,13 @@ func (g *GateModular) putObjectHandler(w http.ResponseWriter, r *http.Request) { task := &gfsptask.GfSpUploadObjectTask{} task.InitUploadObjectTask(objectInfo, params, g.baseApp.TaskTimeout(task, objectInfo.GetPayloadSize())) ctx := log.WithValue(reqCtx.Context(), log.CtxKeyTask, task.Key().String()) + g.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), corespdb.GatewayBeginReceiveUpload, task.Key().String()) err = g.baseApp.GfSpClient().UploadObject(ctx, task, r.Body) if err != nil { + g.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), corespdb.GatewayEndReceiveUpload, task.Key().String()+":"+err.Error()) log.CtxErrorw(ctx, "failed to upload payload data", "error", err) } + g.baseApp.GfSpDB().InsertUploadEvent(task.GetObjectInfo().Id.Uint64(), corespdb.GatewayEndReceiveUpload, task.Key().String()) log.CtxDebugw(ctx, "succeed to upload payload data") } diff --git a/modular/manager/manager.go b/modular/manager/manager.go index ab2c6e7e4..27f5a4597 100644 --- a/modular/manager/manager.go +++ b/modular/manager/manager.go @@ -97,10 +97,10 @@ func (m *ManageModular) Start(ctx context.Context) error { return err } m.scope = scope - err = m.LoadTaskFromDB() - if err != nil { - return err - } + //err = m.LoadTaskFromDB() + //if err != nil { + // return err + //} go m.eventLoop(ctx) return nil diff --git a/store/sqldb/const.go b/store/sqldb/const.go index 669f60320..4e0c2a9f1 100644 --- a/store/sqldb/const.go +++ b/store/sqldb/const.go @@ -22,4 +22,6 @@ const ( ServiceConfigTableName = "service_config" // OffChainAuthKeyTableName defines the off chain auth key table name. OffChainAuthKeyTableName = "off_chain_auth_key" + // UploadEventTableName defines the event of uploading object + UploadEventTableName = "upload_event" ) diff --git a/store/sqldb/store.go b/store/sqldb/store.go index b4bc4b2fa..6e215e85f 100644 --- a/store/sqldb/store.go +++ b/store/sqldb/store.go @@ -74,6 +74,10 @@ func InitDB(config *config.SQLDBConfig) (*gorm.DB, error) { log.Errorw("failed to upload object progress table", "error", err) return nil, err } + if err = db.AutoMigrate(&UploadEventTable{}); err != nil { + log.Errorw("failed to upload event progress table", "error", err) + return nil, err + } if err = db.AutoMigrate(&GCObjectProgressTable{}); err != nil { log.Errorw("failed to gc object progress table", "error", err) return nil, err diff --git a/store/sqldb/upload_event.go b/store/sqldb/upload_event.go new file mode 100644 index 000000000..49b93bfe3 --- /dev/null +++ b/store/sqldb/upload_event.go @@ -0,0 +1,19 @@ +package sqldb + +import ( + "fmt" + "time" +) + +func (s *SpDBImpl) InsertUploadEvent(objectID uint64, state string, description string) error { + updateTime := time.Now().String() + if result := s.db.Create(&UploadEventTable{ + ObjectID: objectID, + UploadState: state, + Description: description, + UpdateTime: updateTime, + }); result.Error != nil || result.RowsAffected != 1 { + return fmt.Errorf("failed to insert upload event record: %s", result.Error) + } + return nil +} diff --git a/store/sqldb/upload_event_schema.go b/store/sqldb/upload_event_schema.go new file mode 100644 index 000000000..6c3c1c036 --- /dev/null +++ b/store/sqldb/upload_event_schema.go @@ -0,0 +1,15 @@ +package sqldb + +// UploadEventTable table schema. +type UploadEventTable struct { + ID uint64 `gorm:"primary_key;autoIncrement"` + ObjectID uint64 + UploadState string + Description string + UpdateTime string +} + +// TableName is used to set UploadObjectProgressTable Schema's table name in database. +func (UploadEventTable) TableName() string { + return UploadEventTableName +} From 3963a1d414d1f2d0c0728baafbf2e206e63320e1 Mon Sep 17 00:00:00 2001 From: yutianwu Date: Wed, 21 Jun 2023 18:18:54 +0800 Subject: [PATCH 24/78] feat: add prefix for segment and ec piece key (#635) * feat: add prefix for segment and ec piece key * fix: reduce prefix length --- base/gfsppieceop/pieceop.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/gfsppieceop/pieceop.go b/base/gfsppieceop/pieceop.go index 8440babf4..ba4297756 100644 --- a/base/gfsppieceop/pieceop.go +++ b/base/gfsppieceop/pieceop.go @@ -12,11 +12,11 @@ type GfSpPieceOp struct { } func (p *GfSpPieceOp) SegmentPieceKey(objectID uint64, segmentIdx uint32) string { - return fmt.Sprintf("%d_s%d", objectID, segmentIdx) + return fmt.Sprintf("s%d_s%d", objectID, segmentIdx) } func (p *GfSpPieceOp) ECPieceKey(objectID uint64, segmentIdx uint32, replicateIdx uint32) string { - return fmt.Sprintf("%d_s%d_p%d", objectID, segmentIdx, replicateIdx) + return fmt.Sprintf("e%d_s%d_p%d", objectID, segmentIdx, replicateIdx) } func (p *GfSpPieceOp) ChallengePieceKey(objectID uint64, segmentIdx uint32, replicateIdx int32) string { From e8154bb42292769f37e18f90c15b7cd9c249807a Mon Sep 17 00:00:00 2001 From: joeycli Date: Sun, 25 Jun 2023 12:51:30 +0800 Subject: [PATCH 25/78] feat: change approval key (#653) --- base/types/gfsptask/approval.go | 7 +++++-- base/types/gfsptask/task_key.go | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/base/types/gfsptask/approval.go b/base/types/gfsptask/approval.go index b9ca16125..4cf364f5c 100644 --- a/base/types/gfsptask/approval.go +++ b/base/types/gfsptask/approval.go @@ -1,6 +1,7 @@ package gfsptask import ( + "encoding/hex" "fmt" "time" @@ -26,7 +27,8 @@ func (m *GfSpCreateBucketApprovalTask) InitApprovalCreateBucketTask(bucket *stor } func (m *GfSpCreateBucketApprovalTask) Key() coretask.TKey { - return GfSpCreateBucketApprovalTaskKey(m.GetCreateBucketInfo().GetBucketName()) + return GfSpCreateBucketApprovalTaskKey(m.GetCreateBucketInfo().GetBucketName(), + hex.EncodeToString(m.GetCreateBucketInfo().GetSignBytes())) } func (m *GfSpCreateBucketApprovalTask) Type() coretask.TType { @@ -151,7 +153,8 @@ func (m *GfSpCreateObjectApprovalTask) InitApprovalCreateObjectTask( func (m *GfSpCreateObjectApprovalTask) Key() coretask.TKey { return GfSpCreateObjectApprovalTaskKey( m.GetCreateObjectInfo().GetBucketName(), - m.GetCreateObjectInfo().GetObjectName()) + m.GetCreateObjectInfo().GetObjectName(), + hex.EncodeToString(m.GetCreateObjectInfo().GetSignBytes())) } func (m *GfSpCreateObjectApprovalTask) Type() coretask.TType { diff --git a/base/types/gfsptask/task_key.go b/base/types/gfsptask/task_key.go index e68b11c2a..a68abcaf2 100644 --- a/base/types/gfsptask/task_key.go +++ b/base/types/gfsptask/task_key.go @@ -27,13 +27,13 @@ var ( KeyPrefixGfSpGfSpGCMetaTask = strings.ToLower("GCMeta") ) -func GfSpCreateBucketApprovalTaskKey(bucket string) task.TKey { - return task.TKey(KeyPrefixGfSpCreateBucketApprovalTask + CombineKey("bucket:"+bucket)) +func GfSpCreateBucketApprovalTaskKey(bucket string, createBucketHash string) task.TKey { + return task.TKey(KeyPrefixGfSpCreateBucketApprovalTask + CombineKey("bucket:"+bucket, "hash:"+createBucketHash)) } -func GfSpCreateObjectApprovalTaskKey(bucket, object string) task.TKey { +func GfSpCreateObjectApprovalTaskKey(bucket, object string, createObjectHash string) task.TKey { return task.TKey(KeyPrefixGfSpCreateObjectApprovalTask + - CombineKey("bucket:"+bucket, "object:"+object)) + CombineKey("bucket:"+bucket, "object:"+object, "hash:"+createObjectHash)) } func GfSpReplicatePieceApprovalTaskKey(bucket, object, id string) task.TKey { From e6ba1c5f2693439ca8fd5329650178bb6bab221d Mon Sep 17 00:00:00 2001 From: ruojunm <46366167+ruojunm@users.noreply.github.com> Date: Sun, 25 Jun 2023 18:01:42 +0800 Subject: [PATCH 26/78] feat: built-in dapp for private object downloading (#656) --- modular/gater/const.go | 4 +- modular/gater/errors.go | 1 + modular/gater/object_handler.go | 79 +++++++++++++++++++++++++-------- 3 files changed, 64 insertions(+), 20 deletions(-) diff --git a/modular/gater/const.go b/modular/gater/const.go index 418a0f39a..343b98bdb 100644 --- a/modular/gater/const.go +++ b/modular/gater/const.go @@ -134,9 +134,11 @@ const ( GnfdResponseXMLVersion = "1.0" // GnfdBuiltInUniversalEndpointDappHtml a html code which works as a dapp, help users sign with their wallet. - GnfdBuiltInUniversalEndpointDappHtml = "\n\n\n\n \n BNB Greenfield\n \n \n \n \n \n \n \n \n \n\n\n\n
\n \n\n\n" + GnfdBuiltInUniversalEndpointDappHtml = "BNB Greenfield
" // GnfdBuiltInDappSignedContentTemplate defines the template which users will sign against with their wallet. GnfdBuiltInDappSignedContentTemplate = "Sign this message to access the file:\n%s\nThis signature will not cost you any fees.\nExpiration Time: %s" + // GnfdBuiltInUniversalEndpointDappErrorPage html code for an error page representing the error message to end users + GnfdBuiltInUniversalEndpointDappErrorPage = "BNB Greenfield
" // off-chain-auth headers diff --git a/modular/gater/errors.go b/modular/gater/errors.go index 9c1d2e637..7d9312b41 100644 --- a/modular/gater/errors.go +++ b/modular/gater/errors.go @@ -38,6 +38,7 @@ var ( ErrInvalidExpiryDate = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 50024, "The expiry parameter is incorrect. "+ "The expiry date is expected to be within "+strconv.Itoa(int(MaxExpiryAgeInSec))+" seconds and formatted in YYYY-DD-MM HH:MM:SS 'GMT'Z, e.g. 2023-04-20 16:34:12 GMT+08:00 . ") ErrNoSuchObject = gfsperrors.Register(module.AuthenticationModularName, http.StatusNotFound, 50025, "no such object") + ErrForbidden = gfsperrors.Register(module.GateModularName, http.StatusForbidden, 50026, "Forbidden to access") ErrConsensus = gfsperrors.Register(module.GateModularName, http.StatusBadRequest, 55001, "server slipped away, try again later") diff --git a/modular/gater/object_handler.go b/modular/gater/object_handler.go index 8c9893563..adf4a9519 100644 --- a/modular/gater/object_handler.go +++ b/modular/gater/object_handler.go @@ -359,27 +359,50 @@ func (g *GateModular) queryUploadProgressHandler(w http.ResponseWriter, r *http. // getObjectByUniversalEndpointHandler handles the get object request sent by universal endpoint func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, r *http.Request, isDownload bool) { var ( - err error - reqCtx *RequestContext - authenticated bool - isRange bool - rangeStart int64 - rangeEnd int64 - redirectUrl string - params *storagetypes.Params - escapedObjectName string + err error + reqCtx *RequestContext + authenticated bool + isRange bool + rangeStart int64 + rangeEnd int64 + redirectURL string + params *storagetypes.Params + escapedObjectName string + isRequestFromBrowser bool ) defer func() { reqCtx.Cancel() if err != nil { - reqCtx.SetError(gfsperrors.MakeGfSpError(err)) - reqCtx.SetHttpCode(int(gfsperrors.MakeGfSpError(err).GetHttpStatusCode())) - MakeErrorResponse(w, gfsperrors.MakeGfSpError(err)) + if isRequestFromBrowser { + reqCtx.SetHttpCode(http.StatusOK) + errorCodeForPage := "INTERNAL_ERROR" // default errorCode in built-in error page + switch err { + case downloader.ErrExceedBucketQuota: + errorCodeForPage = "NO_ENOUGH_QUOTA" + case ErrNoSuchObject: + errorCodeForPage = "FILE_NOT_FOUND" + case ErrForbidden: + errorCodeForPage = "NO_PERMISSION" + } + html := strings.Replace(GnfdBuiltInUniversalEndpointDappErrorPage, "<% errorCode %>", errorCodeForPage, 1) + + fmt.Fprintf(w, "%s", html) + return + } else { + reqCtx.SetError(gfsperrors.MakeGfSpError(err)) + reqCtx.SetHttpCode(int(gfsperrors.MakeGfSpError(err).GetHttpStatusCode())) + MakeErrorResponse(w, gfsperrors.MakeGfSpError(err)) + } + } else { reqCtx.SetHttpCode(http.StatusOK) } log.CtxDebugw(reqCtx.Context(), reqCtx.String()) }() + + userAgent := r.Header.Get("User-Agent") + isRequestFromBrowser = checkIfRequestFromBrowser(userAgent) + // ignore the error, because the universal endpoint does not need signature reqCtx, _ = NewRequestContext(r, g) @@ -420,10 +443,10 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, return } - redirectUrl = spEndpoint + r.RequestURI - log.Debugw("getting redirect url:", "redirectUrl", redirectUrl) + redirectURL = spEndpoint + r.RequestURI + log.Debugw("getting redirect url:", "redirectURL", redirectURL) - http.Redirect(w, r, redirectUrl, 302) + http.Redirect(w, r, redirectURL, 302) return } @@ -489,16 +512,21 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, coremodule.AuthOpTypeGetObject, reqCtx.Account(), reqCtx.bucketName, reqCtx.objectName) if err != nil { log.CtxErrorw(reqCtx.Context(), "failed to verify authentication", "error", err) + err = ErrForbidden return } if !authenticated { log.CtxErrorw(reqCtx.Context(), "no permission to operate") - err = ErrNoPermission + err = ErrForbidden return } } else { - // return a built-in dapp for users to make the signature + if !isRequestFromBrowser { + err = ErrForbidden + return + } + // if the request comes from browser, we will return a built-in dapp for users to make the signature var htmlConfigMap = map[string]string{ "greenfield_7971-1": "{\n \"envType\": \"dev\",\n \"signedMsg\": \"Sign this message to access the file:\\n$1\\nThis signature will not cost you any fees.\\nExpiration Time: $2\",\n \"chainId\": 7971,\n \"chainName\": \"dev - greenfield\",\n \"rpcUrls\": [\"https://gnfd-dev.qa.bnbchain.world\"],\n \"nativeCurrency\": { \"name\": \"BNB\", \"symbol\": \"BNB\", \"decimals\": 18 },\n \"blockExplorerUrls\": [\"https://greenfieldscan-qanet.fe.nodereal.cc/\"]\n}\n", "greenfield_9000-1741": "{\n \"envType\": \"qa\",\n \"signedMsg\": \"Sign this message to access the file:\\n$1\\nThis signature will not cost you any fees.\\nExpiration Time: $2\",\n \"chainId\": 9000,\n \"chainName\": \"qa - greenfield\",\n \"rpcUrls\": [\"https://gnfd.qa.bnbchain.world\"],\n \"nativeCurrency\": { \"name\": \"BNB\", \"symbol\": \"BNB\", \"decimals\": 18 },\n \"blockExplorerUrls\": [\"https://greenfieldscan-qanet.fe.nodereal.cc/\"]\n}\n", @@ -541,8 +569,9 @@ func (g *GateModular) getObjectByUniversalEndpointHandler(w http.ResponseWriter, task := &gfsptask.GfSpDownloadObjectTask{} task.InitDownloadObjectTask(getObjectInfoRes.GetObjectInfo(), getBucketInfoRes.GetBucketInfo(), params, g.baseApp.TaskPriority(task), reqCtx.Account(), low, high, g.baseApp.TaskTimeout(task, uint64(high-low+1)), g.baseApp.TaskMaxRetry(task)) - data, err := g.baseApp.GfSpClient().GetObject(reqCtx.Context(), task) - if err != nil { + data, getObjectErr := g.baseApp.GfSpClient().GetObject(reqCtx.Context(), task) + if getObjectErr != nil { + err = getObjectErr log.CtxErrorw(reqCtx.Context(), "failed to download object", "error", err) return } @@ -569,6 +598,18 @@ func isPrivateObject(bucket *storagetypes.BucketInfo, object *storagetypes.Objec bucket.GetVisibility() == storagetypes.VISIBILITY_TYPE_PRIVATE) } +func checkIfRequestFromBrowser(userAgent string) bool { + // List of common user agent substrings for mainstream browsers + mainstreamBrowsers := []string{"Chrome", "Firefox", "Safari", "Opera", "Edge"} + // Check if the User-Agent header contains any of the mainstream browser substrings + for _, browser := range mainstreamBrowsers { + if strings.Contains(userAgent, browser) { + return true + } + } + return false +} + // downloadObjectByUniversalEndpointHandler handles the download object request sent by universal endpoint func (g *GateModular) downloadObjectByUniversalEndpointHandler(w http.ResponseWriter, r *http.Request) { g.getObjectByUniversalEndpointHandler(w, r, true) From 5229139cfab3059037d3441b1227be0793097578 Mon Sep 17 00:00:00 2001 From: joeycli Date: Sun, 25 Jun 2023 19:06:03 +0800 Subject: [PATCH 27/78] chore: ignore push approval yto queue error (#658) --- modular/approver/approve_task.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/modular/approver/approve_task.go b/modular/approver/approve_task.go index e4c60ff77..0a88d768c 100644 --- a/modular/approver/approve_task.go +++ b/modular/approver/approve_task.go @@ -84,12 +84,7 @@ func (a *ApprovalModular) HandleCreateBucketApprovalTask(ctx context.Context, ta } task.GetCreateBucketInfo().GetPrimarySpApproval().Sig = signature startPushQueue := time.Now() - if err = a.bucketQueue.Push(task); err != nil { - metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_bucket_approval"). - Observe(time.Since(startPushQueue).Seconds()) - log.CtxErrorw(ctx, "failed to push the create bucket approval to queue", "error", err) - return false, err - } + _ = a.bucketQueue.Push(task) metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_bucket_approval"). Observe(time.Since(startPushQueue).Seconds()) return true, nil @@ -148,12 +143,7 @@ func (a *ApprovalModular) HandleCreateObjectApprovalTask(ctx context.Context, ta } task.GetCreateObjectInfo().GetPrimarySpApproval().Sig = signature startPushQueue := time.Now() - if err = a.objectQueue.Push(task); err != nil { - metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_object_approval"). - Observe(time.Since(startPushQueue).Seconds()) - log.CtxErrorw(ctx, "failed to push the create object task to queue", "error", err) - return false, err - } + _ = a.objectQueue.Push(task) metrics.GnfdChainHistogram.WithLabelValues("update_queue_in_create_object_approval"). Observe(time.Since(startPushQueue).Seconds()) return true, nil From c030ddfe6e96ce65457f48dc3c05888f3083d17a Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Mon, 26 Jun 2023 16:29:27 +0800 Subject: [PATCH 28/78] docs: fix sp readme (#617) * docs: fix sp readme * fix: rename approval to approver * fix: rename approval to approver --------- Co-authored-by: VM --- README.md | 25 +++++---- core/module/module_const.go | 2 +- deployment/localup/localup.sh | 54 +------------------ docs/readme.md | 2 +- .../config}/config_template.toml | 0 test/e2e/spworkflow/e2e_test.sh | 7 ++- 6 files changed, 21 insertions(+), 69 deletions(-) rename docs/{spconfig => run-book/config}/config_template.toml (100%) diff --git a/README.md b/README.md index 404fcf02b..9a37dd7af 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ Greenfield Storage Provider Version : vx.x.x Branch : master Commit : bfc32b9748c11d74493f93c420744ade4dbc18ac -Build : go1.20.3 darwin arm64 2023-05-12 13:37 +Build : go1.20.3 darwin arm64 2023-06-20 13:37 # show help ./gnfd-sp help @@ -62,9 +62,13 @@ Build : go1.20.3 darwin arm64 2023-05-12 13:37 #### Edit configuration +If you want to view detailed config.toml, you car visit this [page](./docs/run-book/config/config_template.toml). + +The following lists some important configuration information: + ```toml Server = [] -GrpcAddress = '0.0.0.0:9333' +GRPCAddress = '0.0.0.0:9333' [SpDB] User = '${db_user}' @@ -100,7 +104,7 @@ ChainID = '${chain_id}' ChainAddress = ['${chain_address}'] [SpAccount] -SpOperateAddress = '${sp_operator_address}' +SpOperatorAddress = '${sp_operator_address}' OperatorPrivateKey = '${operator_private_key}' FundingPrivateKey = '${funding_private_key}' SealPrivateKey = '${seal_private_key}' @@ -116,11 +120,11 @@ MetadataEndpoint = 'metadata:9333' UploaderEndpoint = 'uploader:9333' P2PEndpoint = 'p2p:9333' SignerEndpoint = 'signer:9333' -AuthorizerEndpoint = 'localhost:9333' +AuthenticatorEndpoint = 'localhost:9333' [Gateway] -Domain = '${gateway_domain_name}' -HttpAddress = '0.0.0.0:9033' +DomainName = '${gateway_domain_name}' +HTTPAddress = '0.0.0.0:9033' [P2P] P2PPrivateKey = '${p2p_private_key}' @@ -136,8 +140,8 @@ DiscontinueBucketKeepAliveDays = 2 [Monitor] DisableMetrics = false DisablePProf = false -MetricsHttpAddress = '0.0.0.0:24367' -PProfHttpAddress = '0.0.0.0:24368' +MetricsHTTPAddress = '0.0.0.0:24367' +PProfHTTPAddress = '0.0.0.0:24368' [Rcmgr] DisableRcmgr = false @@ -171,9 +175,9 @@ RatePeriod = 'S' ./gnfd-sp --config ${config_file_path} ``` -### Add Greenfield Chain +### Join Greenfield Testnet -[Add SP to Greenfield](https://github.com/bnb-chain/greenfield-docs/blob/master/src/guide/storage-provider/run-book/run-testnet-SP-node.md) +[Run Testnet SP Node](https://docs.bnbchain.org/greenfield-docs/docs/guide/storage-provider/run-book/run-testnet-SP-node) ## Document @@ -191,6 +195,7 @@ RatePeriod = 'S' * [Greenfield](https://github.com/bnb-chain/greenfield): The Golang implementation of the Greenfield Blockchain. * [Greenfield-Go-SDK](https://github.com/bnb-chain/greenfield-go-sdk): The Greenfield SDK, interact with SP, Greenfield and Tendermint. +* [Greenfield Cmd](https://github.com/bnb-chain/greenfield-cmd): Greenfield client cmd tool, supporting commands to make requests to greenfield. * [Greenfield-Common](https://github.com/bnb-chain/greenfield-common): The Greenfield common package. * [Reed-Solomon](https://github.com/klauspost/reedsolomon): The Reed-Solomon Erasure package in prue Go, with speeds exceeding 1GB/s/cpu core. * [Juno](https://github.com/bnb-chain/juno): The Cosmos Hub blockchain data aggregator and exporter package. diff --git a/core/module/module_const.go b/core/module/module_const.go index 4264a2724..f44319e9a 100644 --- a/core/module/module_const.go +++ b/core/module/module_const.go @@ -5,7 +5,7 @@ import ( ) var ( - ApprovalModularName = strings.ToLower("Approval") + ApprovalModularName = strings.ToLower("Approver") ApprovalModularDescription = "Handles the ask crate bucket/object and replicates piece approval request." AuthenticationModularName = strings.ToLower("Authenticator") AuthenticationModularDescription = "Checks authentication." diff --git a/deployment/localup/localup.sh b/deployment/localup/localup.sh index 802c8de57..1fd195ee9 100644 --- a/deployment/localup/localup.sh +++ b/deployment/localup/localup.sh @@ -151,55 +151,6 @@ function make_config() { done } -############################################################# -# make integration test config.toml according sp.json # -############################################################# -function make_integration_test_config() { - index=0 - sp_json_file=$1 - file='test/e2e/localup_env/integration_config/config.yml' - - validator_private_key=("$(echo "y" | $gnfd_bin keys export validator0 --unarmored-hex --unsafe --keyring-backend test --home ${gnfd_workspace}/.local/validator0)") - echo "validator0 private key validator_private_key" - sed -i -e "s/20f92afe113b90e1faa241969e957ac091d80b920f84ffda80fc9d0588f62906/${validator_private_key}/g" $file - - echo "SPs:" >> $file - sp0_opk=$(jq -r ".sp0.OperatorPrivateKey" ${sp_json_file}) - sp0_fpk=$(jq -r ".sp0.FundingPrivateKey" ${sp_json_file}) - sp0_spk=$(jq -r ".sp0.SealPrivateKey" ${sp_json_file}) - sp0_apk=$(jq -r ".sp0.ApprovalPrivateKey" ${sp_json_file}) - sp1_opk=$(jq -r ".sp1.OperatorPrivateKey" ${sp_json_file}) - sp1_fpk=$(jq -r ".sp1.FundingPrivateKey" ${sp_json_file}) - sp1_spk=$(jq -r ".sp1.SealPrivateKey" ${sp_json_file}) - sp1_apk=$(jq -r ".sp1.ApprovalPrivateKey" ${sp_json_file}) - - sp0_op_addr=$(jq -r ".sp0.OperatorAddress" ${sp_json_file}) - sp1_op_addr=$(jq -r ".sp1.OperatorAddress" ${sp_json_file}) - sp2_op_addr=$(jq -r ".sp2.OperatorAddress" ${sp_json_file}) - sp3_op_addr=$(jq -r ".sp3.OperatorAddress" ${sp_json_file}) - sp4_op_addr=$(jq -r ".sp4.OperatorAddress" ${sp_json_file}) - sp5_op_addr=$(jq -r ".sp5.OperatorAddress" ${sp_json_file}) - sp6_op_addr=$(jq -r ".sp6.OperatorAddress" ${sp_json_file}) - - echo " - OperatorSecret: "${sp0_opk}"" >> $file - echo " FundingSecret: "${sp0_fpk}"" >> $file - echo " ApprovalSecret: "${sp0_spk}"" >> $file - echo " SealSecret: "${sp0_apk}"" >> $file - echo " - OperatorSecret: "${sp1_opk}"" >> $file - echo " FundingSecret: "${sp1_fpk}"" >> $file - echo " ApprovalSecret: "${sp1_spk}"" >> $file - echo " SealSecret: "${sp1_apk}"" >> $file - echo "SPAddr:" >> $file - echo " - $sp0_op_addr" >> $file - echo " - $sp1_op_addr" >> $file - echo " - $sp2_op_addr" >> $file - echo " - $sp3_op_addr" >> $file - echo " - $sp4_op_addr" >> $file - echo " - $sp5_op_addr" >> $file - echo " - $sp6_op_addr" >> $file - cat $file -} - ############# # start sps # ############# @@ -312,13 +263,10 @@ function main() { --print) print_work_dir ;; - --gene2e) - make_integration_test_config $2 - ;; --help|*) display_help ;; esac } -main $@ \ No newline at end of file +main $@ diff --git a/docs/readme.md b/docs/readme.md index 708db47dd..d2afee6ce 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -25,4 +25,4 @@ This section explores the inner componnet of the Greenfield Storage Provider imp - Workflow - [SP Workflow](./workflow/workflow.md) - Config - - [Config](./spconfig/config_template.toml) + - [Config](./run-book/config/config_template.toml) diff --git a/docs/spconfig/config_template.toml b/docs/run-book/config/config_template.toml similarity index 100% rename from docs/spconfig/config_template.toml rename to docs/run-book/config/config_template.toml diff --git a/test/e2e/spworkflow/e2e_test.sh b/test/e2e/spworkflow/e2e_test.sh index de182c0e7..c2f3329a0 100644 --- a/test/e2e/spworkflow/e2e_test.sh +++ b/test/e2e/spworkflow/e2e_test.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -#basedir=$(cd `dirname $0` || return; pwd) workspace=${GITHUB_WORKSPACE} # some constants @@ -11,7 +10,7 @@ MYSQL_PASSWORD="root" MYSQL_ADDRESS="127.0.0.1:3306" TEST_ACCOUNT_ADDRESS="0x76263999b87D08228eFB098F36d17363Acf40c2c" TEST_ACCOUNT_PRIVATE_KEY="da942d31bc4034577f581057e4a3644404ac12828a84052f87086d508fdcf095" -BUCKET_NAME="spe2etestbucket" +BUCKET_NAME="spbucket" ######################################### # build and start Greenfield blockchain # @@ -108,7 +107,7 @@ function test_file_size_less_than_16_mb() { cd ${workspace}/greenfield-cmd/build/ ./gnfd-cmd -c ./config.toml --home ./ object put --contentType "application/json" ${workspace}/test/e2e/spworkflow/testdata/example.json gnfd://${BUCKET_NAME} sleep 16 - ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://spe2etestbucket/example.json ./test_data.json + ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://${BUCKET_NAME}/example.json ./test_data.json check_md5 ${workspace}/test/e2e/spworkflow/testdata/example.json ./test_data.json cat test_data.json } @@ -122,7 +121,7 @@ function test_file_size_greater_than_16_mb() { dd if=/dev/urandom of=./random_file bs=17M count=1 ./gnfd-cmd -c ./config.toml --home ./ object put --contentType "application/octet-stream" ./random_file gnfd://${BUCKET_NAME}/random_file sleep 16 - ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://spe2etestbucket/random_file ./new_random_file + ./gnfd-cmd -c ./config.toml --home ./ object get gnfd://${BUCKET_NAME}/random_file ./new_random_file sleep 10 check_md5 ./random_file ./new_random_file } From 9e151a50b18b5ce5d4d418071ec3bdfa535f54d6 Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Mon, 26 Jun 2023 16:30:00 +0800 Subject: [PATCH 29/78] docs: sp docs add flowchart (#649) * docs: sp docs add flowchart * docs: update three flowcharts --------- Co-authored-by: VM --- docs/asset/01-sp_arch.jpg | Bin 227494 -> 290849 bytes docs/asset/02-get_approval.jpg | Bin 0 -> 114662 bytes docs/asset/03-put_object.jpg | Bin 0 -> 143693 bytes docs/asset/04-replicate_object.jpg | Bin 0 -> 418674 bytes docs/asset/05-get_object.jpg | Bin 0 -> 82501 bytes docs/asset/06-query_meta.jpg | Bin 0 -> 83546 bytes docs/asset/07-challenge.jpg | Bin 0 -> 76627 bytes docs/asset/08-gc_object.jpg | Bin 0 -> 82925 bytes ...estore_arch.jpg => 09-piecestore_arch.jpg} | Bin docs/asset/{03-ec.jpg => 10-ec.jpg} | Bin docs/introduction/overview.md | 2 +- docs/store/piece_store.md | 2 +- docs/store/redundancy.md | 2 +- docs/workflow/workflow.md | 25 ++++++++++++++++-- 14 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 docs/asset/02-get_approval.jpg create mode 100644 docs/asset/03-put_object.jpg create mode 100644 docs/asset/04-replicate_object.jpg create mode 100644 docs/asset/05-get_object.jpg create mode 100644 docs/asset/06-query_meta.jpg create mode 100644 docs/asset/07-challenge.jpg create mode 100644 docs/asset/08-gc_object.jpg rename docs/asset/{02-piecestore_arch.jpg => 09-piecestore_arch.jpg} (100%) rename docs/asset/{03-ec.jpg => 10-ec.jpg} (100%) diff --git a/docs/asset/01-sp_arch.jpg b/docs/asset/01-sp_arch.jpg index 8e1e5c74871802de961c068bbcdecd35f5ce7658..7dbdb11bdeb1fbf5653f7c53d294ccfdb7ef6dbf 100644 GIT binary patch literal 290849 zcmeFac|26_`#*kUC&?btREV-AONC6EJ!{!xLb7Ei%47=Jmk^4W6xmH=smRz1l`L6f z#vl@zLE|tpXTGP``~7*pKcCO*^LsqLkKaGv-}mwB=FHsAoO7T1y6)?`?&o#g*FBhH zOd_z`Sl>t=U||7(`|uCIoB=}g!o98mfT<~P1ONaofQ>~0V25k)C4erAAJZtbso{_(!!$&UZHf}`aL>%Z#oz3@|4fD;y;{vrOsp8f&* z)s&9{C(as~vi)umE`QZ!|ElgDmP#H^18Uj6-{F3^44#c-l6LGhI(H6bVP&pwbV2Vg zLU!zQ2?)6U`(|IikRU6Av-|Dr9rklB033h_unXV?_PV$Q2b{66xbVBp|9Jna{eQhs zNB-(NFr@UmEvfe7GRFuqE@LB#rw{fVz5B1r{^ttMRrg>w_#--esJjIOg}{tB3fC>d zLIQr*@4$88Ah=U-{lV|L$3N<&zw565s5k$0%_XaI@HL;|`ffK*m#c7{2-g)||I>Mo z|5W$A9{T&azsv8@+Uf3ReGz`&10Q>V^MD?30sh$!SOdp^j0&f1itq z0stK*{9*sU&zZu1)@H)pjrjLD*-J3VegM!l7vK`)^1HRa3JaAj_?p_Wf7E#!0f0T5 z$y~Pu0M5ezz!+sRX^l)K<2eAZP5?kFmDvgiaB=M5XkleJ2<#AGVHIFub^-{PJM1if zm47x1u|kMK-NDAn&i-2nS#HCh18f59f(KO3a0pqta2^a4 zK6W>~kW2b(O`C|-09i)OH7JUkM^tQ&xPIr+m!)HO7ZpE#*??!2D9fuWJ{#Y@&U zwlH~aSKY69czSsUhlJh;3%_|QI_6$%Tzo=eM&^U8?3{;>a-TjcDt=y4T2@~BvaY_N z@zv`$?H!$6-S6J_^bQUUe;FAa8=sgV&Cbn#TUh+QL|I$k_(k0Wx3+)N#R9PYgDm*< zpZxl-bP2$8?OFo|8|8r1skpF%n4v8D+?@4tO5WM zfFSv&iGVzgvzbxG1g@EC{d7WFfJrm)a(5?AN@v>E+ZtSqs(gMSil1%YnQ{Vo1XKHt zA&lc;0z}df6G*jT0@hXZw4c`^M-U%c))yrd7FIzY`O*A zalRBy;Gb0bK*TT*CzP6x#9N3Ap{)2e)nT zbQ&g@-^Y9_XZkj)9-8s@mj6bY(A%wOe|PSDBcevSfb#N8lSW#dt7*@s3tRoyR+Tf`o0FdvoHcW6(_(%^=0= z4kig_rd}hYSqEDNnLyl?$WQq7ttvS5eE~(k)?7hlV*;6};7cZevci17RKGomDt1#1 zExed5=w%;ixaDxEP=2Z!w~jo?1ZKt%FL89p2LlR;F#+*skknn8790Zq*2o0zs$wX) zj6F|ZN8;n58$Qd2nZV(#ekQQ1m64BGSa)Ostq<|wM+AA9e|!|eXyEDGNlYMlbg@hL zHb#KHeCi%@{(tuwX*d!xKkh{dy<`DN*Jh=LG6C#Ie(_SJy2Wp5hg%B zqynj-o(Wt2q8qHgpb9gA2lR!%F(D1S!MXf{2_SO`5L*`$==RxgP1OBO#=ds!8ul^V z&VTLnz~9J_aBOs!thJK5wUpd~!)XsOKvK$$f9qu5L);39R73FzlCRB<0x|A&()pOAzKOakPA zkAm2d=P}HIf5u0zbGQRJ1{-((DqCKSBBgucVPJ9KELFj zMd!?=Jt`!l3EzY*^#bzPg)LA2IsQM&12ln#!t~UFLr_~SMipgf`;iIMZ_@KrQjp7% za_b@u2=K)()FTmH&c!e|glmM@Qsa|@+O^`K6pd{+)FZ;*PN_V`;Ne_wA$q?LJDn#o zo;mn`zW~{;!J&FiK1`dxt{n7f-?21^n?c*{Y~)MfwTMN4hM^U*$DF;|HxwMl?b&<8 zf}bhW-4RWZ(!o#bF0NHMs2BDh{5}+%a|HRkKbcTZST3QR{A%Dqp^;Gc=puo~ekMu< z+t$CxJu^C&>Lri|?clqH-p8oNxDh6VEKCtZR2PyPY}vk$RX3=SXVQdyaP`}sc`pu5 z9h{jDc?ja}VFDa;&<6x>q?A3~j?uDlm*PX(j2~Lr{*XK0nR7$aRt%xto}M!qcrY`; z1N%Xl_+YGrQxf9Ifp7c<^e)!z-^vTzceoKTT9{JNh(H9{A4(uFrpR6yn*W$-)gI zbRV2T;nPq(H^Ptt8H`lw#zA!DmLg&SY-$f|mp@H_$i{;6SyTrUs0C_;=bXaI4m zk|!bVQ)FzXGaET40jJ|q8D6`sFm=)T&E4w*_iBY><*UBTag~bq95T3e?c@m_HmmL4 zTx-SA!2a{%E$Vc&+Xf&7LJ53%M#MCWB!#hv-+2|)I+4<$+9S)5H(yzYQd}43N zouN1PrhhdiDBLEV6k({@ZcReNErum4vW6Tc7Pe(1*J>qPy2`nMuLMalpxZjUu^DwBsk1SO5*_HT8VIE=73)02}~O-ZN#z(Te{#Z z)zMr$JZPG55sV{!FB)oAD)~Cs@vZwwf7r1o@xlfht4q6`4N{I3CaG8(r^j3_u05K3 zIX4k$1}V}*Kv}ZT8I%Y*h?1H>{HU7eI`tzrMk0j$E$dgU!#W$B8YCnio>+)_;oq%v zLxmTu2=){6F_#L?2;10b{dzEZ?BofSgZ5z!=B(b2skpg5h-;Mzu*H4B5KFhwSn@>- zsIaTPf_`?N?5iyFsb_>K6G5jl{?ZDU&0OVsuFLLhpniTxnkbG}t*wgu0!<*vehYLV zEcFqnfFWY<>2eN2Vqk;3(ZH`29#;PJfM-XP_v>GJl@QO;kwP<)U-~+#?ql=#=B$Wc z!duhFhr8Aq%eLVUpfA&`jJy`{@Mk2}@3$IL>s7crd;-H}FkEN$m65~}mplt?Q~N%q zB;s;qZnze@?)*B-Nj7Q4l0%WpSro)OhLNNMhQoB2S`^fXkbvBnE4Hqr|l`>bUdGm~Fp8FaO35 zLH1dvdE%b8zM2)8MUw6De4hIdu&6!Im7Mm>PIu@j{}IXB+~hf|k{{qV6aRi}sY_Z< zu7y2#xa(5Ltc{kBk3@Hj@Z)`NoENqQz-OHhf@~feiVz1I)|8OOgG$%lzI~J5(8d1t zjaY`ksVor_p)nr%2gW@Nu@?UVOBcRK@@pbTQ+-Eq)f$Xl_G9*JwPXtdGxea!nXk(`(kI$iI~8KcM*=D1Glwfjr<(z z1fCz3zXvd6IE{?Pl1*@_&_TwN5Y*l!XN@nvPU$*Ho0(MDEzj207TC>ah8tvldiXFM z-9J?-`Qp(@sd_2r@KTkqvX3IWY(M;3jAR0qdLpkMTIvbI75G3MiJU@-q20)=R0Xh} z(mykMHg_|cexUM)R`6wmZ=H=cHW{V@Au@gzOUGW%1)}bw$bNXba0K-U6NpMfi-JkB zn2BhMnqd|C@v4cjagCJ6H@si;aXxH|!G!<(>2ug7v9#>%xOZM)@M7BH(-uPpjtThT z$1hU(!AQIy(%XEGJ$S7*$ZBwE(=;M{R@^bdGurg9xMX74B~Hal!OlIDzFDy-yaz5? z_XxN`*>|n+Ga6Cs^h4}tbnupFpnA0cCSgoj#6Yo=r3Xjyhq$K5s7^fjGHP=39n7rT*3DYMl3e}3|n!e2#D(@?PRz% z#OqHppgS^NBZ);}<`gkY@|UUnhDS4#Wpfg~IBHzJd?yR!=xKvg<_ezAeEV2!=U|zG zMd0)L%&q+?8ZryD2Gj_$BlSHa1LOJQWeJ)E%$xBSpStJa<11-X-~7tjx%g?KetO{W zyn)Ed^d3r1jE=v%axw9vO}}kJ)V0P8$HRj&f>*oet|i|W4?0$oo(nl5iB$x$zyC7f zCYj%nQ3T5?cHh3tnRi_X*2%ug$%#cZQ<0VukrxD+!=h{ZUtQYyS%-xe`M#}D;m|5w zLN-JhNqjV|6u);1A@J)wePcxxPxZFMSz2EzLFDc&dzb!65lZM^LDDpK@pmG3!p%>N z!uuqLGO78DJeeyA6y6TSP9jT%Lxwqnfg_qjyWs4LD3{3h@Cc@Cw!?sfP@8>cS}3VI zV`(I}`jJ%U(1f_+y{0Zj1+}L0hFM>}IJ&SHm*4Ngdm@QhSn@d>D2rcFg3aqBc##q@ zi)?>ij}Z#b({uc2U@1{u;;%UL=&EE5-&fCOmbLPGv&H8FHJofsvYc+hX(fjTqoVSs zu;~nI&+X|Hl9JXbRL5oj!NFwnP-D7#O3h-gA@XjO79@fsK7|F;pnt*<;#zkrizJ}P zM#%UHC~n4y+h!K^*{zgXDkMbU24-IEKwbELR`3=wqZm`>%!i{^2;>Fy@L)7 z*2y(lk(CQ0_{y?u#pPu}zjEjs_B+fXXG<!*0tLh{F$m3)>$l+ycD!^)gJCixVf+ zd_d;_<#<~UiIA+-EZ0HU8S>+>A4j3D;)ADh&{cs^#Ok$#e6w=4}&s!P`8 z5w>Om<_nEPS+l*5*F1hT?M*nMSaa0<3fKO*-4Tg_`*#)qr7Og!5w`kp1_@pRQ3x{wLUwv(Z}IPqh<{x11j?EACtHTY0R71I||$of}b?Hkxv zKXkHGuV>>}^37bs75o(pcqDF_0K;zt1=Y#V2YtFBvFnYlO_ik^^k%qQSET>68`~@DTqpHzw4Ll=QkP1O|so+m6D+`)C&9 z)Z3cf*898X3=2Zy&;1OPs1?c7A=u45Ew`Rx0yY}d%U|}o)G*BGE{xY0S2&Ms-CkD$ zoTqrS?-TqY?5C;K@Y2gCEDd$Yi0$F#q!YH?d?K3a4KmyVOXH3iUqBTCMVUZ*_6M}- z6VEjhsrv4m<3CGY-{JglNdD^tn#pm! z#fMt+HAzbA6ZPwxDA!kwZyT?MoU)F)kXmlIXjrMbcWIZ!4bQ*ycb?yrtZoB7vKI3#>z0nCo zE}ra(pg*g4ho@Su;y1>h+kQvUMUx=zo3InlqfI;PGtw||>J^Bc$>YYMF|5_|6S8{>7`$=+&ofyyHyx7aOI?YMszAv?bs$+Se>X+mdWnuKlxoC(dE$30{edB zwRJ>dW(10!`az{b+ux1h=Cn%V~l5vWIQn+$%b^n z#$!cW4$}8F4HxB*AUqS`<|y_Er)X3?p8SS6GT@dVevnU_sC|+Z*vkXiAc?b13cnSH zC;XWiqcBng8f;P5vMBc-elqWAIw|er18(rpNcMfh3Ywx77OYA*m-}sVll@i?#B8F8)b%-ozLE>6Ko0ZBp3J$e?hVA#7IGBEMkC zMk~D3hDORl!Uk?9=w?2+yn)&Y`k6GP34~{V7KvT3!@rKT43)ojNY7@SB_(Xxn!|$; z2;Ypsw4FnGBF8czE~8Rt1jbz!{wufLIvoy`+;*RdFc;8yv6NdE^uT0~Oh%~JLv<)@ z)!r$4ocB)eGh{WEtWVg|3u`r}390upL=|8g!@p}7Z6LNBK`t?>M}ZH_XT`6**3D71 zzpx*XsPn}vuh@LDOnWWqky+%&D&=4}bVV3fpu*v#s8RaKw~QvdpGunUNirf<$LMA- zB8IL)R64ylQsX%B-qnj*`yop8h`Oh^gvVN6U5PYOH{72}W;9@k2?(-v6JjiyS_D3- zKwk#6NqH%{Ccfu7Yx^QfC%)gxtFv%2P_G_Tm^2obJN;2OK}cB7wJGur{8$^P1536> zCSu9v*n1F?0Z%E{^~oyHzm${+W}l_W#rbRB7c+a)#IQ2ug8~N|jDR^(b>uLTe7%1q zj^>Rvd;!+eETAtnU`3~vZ;XRe{YHzBp-kc@@vD!ddHo{<^J`6|cjzxSUS>(&I&wlk zgtS!`_GXmA;Q1D(cF}vPs*xD^5SEp23=wx6NjB-fdrLo){FL{U0SS{@5FQd<4TWD< zx?;J2GTK9vLA@DX8<{5J^&YI73O& zA;RgnUctAc-~FgRVuM9bKXB~vyEAWbg##IbBHxf*jsFIdj|Gv&qdz12y}XE~`yDQ{ zW(mCgMagiDPTtCEYn_vqx`#3O0ncGD0EwE=iZh>Vx8mQQ#%>pcT>XX8E_3j<^i@*C z&kX343zLS7UJ}j014z0^67@wMb&!Ij3r17vBmIdwMo+&kQ6-ccgWs6UmM*3pcvGJn z#8Pl5@vwWWy|AICh+^A)xpo3MAP>`K467+dv{{>k=Y)n@P7c!%6kT5jQun7xIN&=! zIQKH+P=x=6m$mc$9M$;8z9(#Up4dsK#MG+LC0|moLEgbk zoO{b>$m~O@@vhqW@t+YYZG^E3h!+XZf3}PqIv?fYEdL5WhKy*h1%od*ah@Nr`L=HP zo&?Lyi_!!D6~J?K{xJ+Q(NicE#GLTwlb})jIGqZo4z%P2*r8S9sYWZAeQX!v3!$u z0UAXLK{8(Wm*(&C-H6uT;)=)mB0`JC-_8uX7TE82cTj1nbf5j%UL&{l-Kv?`jd~D9 zfihTl8l-Z+WdZ{#s17l@IBaTVn`SYLI~y2=d_jefv5J5h!~YAFu6y>`@#W&V$aqYf zA>Ks=JiNL*Kx0SIgUS3WDYH_+mqGVumz{i6g)>b|ou~Z1<$tr6ew}Ss5udAl73S1FDKc(_lY)hISy8ajEt27@kVstWFaG#~qxC~NhS%MO zTiRb3pljG0ACw!)HlE?h&_5ox_m0B$O9~u(Uxh`N)Xz@{ckDzqUdYju$Zu}QIf`Ys zm(LJ?@T^SXsPqxT_xrbPj%HRa9M*e#y>hUTRIi?KY0Jthvg8cx*KNQy%3dc77Jo&M zg`~oRj_{NP)5uE^cA>m|p3O1Cl_~-){UPsP={;<=D(c(AeZO+Y>IAX?HOUFt{2W9s zhmns%xgir!JcgKzf&&S=F>P|XGU21|-e5Zy>HcC#b(6lx<@?`FODawM58pTZFskz|n4$hR9`vh@laiN4*Ye=8qk_ROO@go* z-O+$o6eC9v^_<;!5wtRB`h1DM*vo z8QMusHY8hCwtnTZ8-vkH)@xWjk-EvU1gaHig)e)2yZ7>s8~t-h(5?fF7>qYOuXRjw z4Qk+%Aw}>d8FP8qBhxs#TkIpI)JvlB=snL5)d4+{&jr zN3$}48UVtOC1}7rye(lA%&Ig&r!O-5M=32=7&0~6qvyI#N6E$U8YKv zi4}53RQ+FXo1?zLO>w%Sokr-txpeF$=oNTUD=bP!p~RsLKpy#s9&|D}q^(w&4*P-b zqWHl?{6`G5s}np^9S$%3q1-82q#f#nz2UazucHEVZ3?=I>a%q=uU<+>qa9Py8H75_ z7VWen&M|i2-4Ln68TJH3qjO%Uthk~7JMl$Ot6WdNK z93)ADBp9X0E2}X~KvGBbR%2g9xObcKxY>m;YK@{;`f-}&WrKr=z=AC5RMQfw8^0_~L^70(p$!duDvE=yL2=Uqg-9yGUN>~rC{mT1Z)}nR zL-&S_zHr1;g}b;)oW3J?BPyWsmVy9X3>JawOkn;hCWb&2AS0k0*?`$+BBt_8;Fwmx z$Jal`pmsd52vKXha*Ad{SFIgFx+7C=^|#>!JuAZ0gS^U3TZ(1M3ohGUNY3a^e1AH; z=WugA?oP=T9KVW&kL!p@A!-!d+q=J)sgQMPodgl^Q=d6mW2OE3_P3f)j4%6Fhm@E| zS3Sivi8mE*Vl8&FiOX7GTXR6FA}ZbOt>?r1)%0f>PMFF5|_OI(>Xgyrbr; zG)$cq8ivDBB9U-xQm}>n48@Te!>EK5ypU3%5kj{R7e?Ka3rhR^&RaTu`T?Nmv zeLmba_QVy>2BYV1Nm$Vz2(kl$?|WF|tbw6Af^YJB*m8^OWc#KX^05W+Xlhti!^R2R z_e@Bt5th>i*hO2-VFFxv2o(QV$s0?M@UM{olF1Yq5gOz5sn|&5GJBYDk};k zJb38%3ZbkqUdL$$yRv5^#hWPF5>a*VTI$-TZ_YZTatXsCcmDXV+L{*p{aEmXDV|u2 zBAelpoTVrzcs0?Buy-kvAH-@_1-g0omVWgL`W|H8BXp{9TfH2&+vH)VndV~!j=dcI zqx@rT5Z^TTl6C~%E75O29K04 z6e*q$yu)z@A&6<4X98=Ak^QJ&zepGct6Akz_tW;i2`BZ9(dYSz(b1K!H6sf8$YE5t z^{9UM3D&SuG67cfC9s2bjzH%mSGMdO5<4@XZvDD>IMhzM;gUpbMWD!3QJNB!4ZL{` zIHBvHc?Lwn3$?U>NCM#no-RUwq%vu!@Y#!$hJ-Tj^>FVcDI3ViSlj2NKz8@fZpVzl zcfy1w#%KA;jQPwDmTAkg5I4MY?+_`;=^oA`J+0IuhxpSQz_3=a z4r?-BRg$TD1tVKKL2g}6ERao1RjYmcE?Eqz8Gw8YFHhsCdl)Y;KA0G!fCgc_f|@m| z%RaWa-Bl!(tz}a%T82@)pKX7+YNdKGQS)wnt#%?bjwe=O*OK`sSfNg-2qs`8gIlP8 zcEY^Q3y6ww{t!0&2y?pZ}p(U)%kzns+tlJ_ER2GTrHXmn&xCFw;DsZgKgpd%a z@Fnz=_8C0IdKlsQ8lUQWW;GV1k0-7~3HV{cC%*(mQbbk=WFJ&V6|xNp2etXb3qHxF zqQt;B$1f!=U#1U3t=!wPPt%^@;GJ;MwHp<0;Z<{gmoVQZ_qbH`NAvGYW2e^+V0{Nw zW)rs^A--~aLo>`WHT0pzJ5eJq!($Jt4y%s%qp-KDkvxM6jOwXp+ZlwPDpOc^BWYbu z+if{&8lwOmO4I!@R9}ouRp&D~lRG1;?sQ&MN_Wp-ltqpQi;R+k>_W&+O{*tMH_X_M`ql( zNz|AERh5y0clD3Xfp#*|t?@I0lp8o?VpiAK+bAP{(mhtk_jr}``E$FI4=dDp$eqN^ zBOqQ8*hKcBML<(Bq6=B?zbqb%DMoPHe=J`r5PJIv=R5bJ(Q5teiN^a zuC?P7iBBos>E&g%#tiY$!)=kxf%Yn`X%MGk|bGdYO*ySmJLHElGBEy9Rs6EKKL1?Tspcgs!zpuiCnGC<3m^ImCU8Z2 z#e@n&rl>6tHS;dITFRGxiek3fY9%U@p!jpOcyKQgdA#S)&s#_NhI!y<&}}!n!RAv% zPCnZ!oj0;=j=t^cTzg?iN|aUn`5~OAoLdHtg*GyPIIdJBZKVP0g^b z>@@xKtNiq+;*^w)+`^kI;uiB(s!a&(@&Z$R7s_A*%z*Ug&#G);1`q41#xlE6Q zVUMH(6BrD`5or+DIrsxJ@Gy*uYQW>Pb7%uFjxBdIfqn)NTSZrUa@tt;SuiIxJ!p*> z|0M3iImH-3R-pXDY2-9NU1S}y45rYaPG30J!Dt#Eiv;(h=t3UERl4v3^)}C_{AXF;b7ko z4BRR5IrDL>DWhJ4mIf-JR{Tj7hy;Xivt@9w&r`d}e5&s5#)ccoj(WTvy{|4I_am2P z1Ga9;dh!2KLCsIm*doX9^|0H!PQ&;h+3=oDnoCIDzME2U$ssa_3>V-@Fk36@uD11* zK=XSnU@D;6FnCUG+YfylOdlub&T`_<#T4fbAnsV3CQ!y>b7 zOM%6ejc2ePIk^h)+@$Hj6PIJN8*9(=s^wg3*Q1q05H{Ca;-)+Ahm8P(^LVLk{;wk@`#^ftv4hHepl|%X# z9gDI@1M95Hn83|Fi9bql7PCm<4HZxZl?-uW+O$KkV~*65;4H~M&LQ<)#gxy{@TYs8 zMpQ%xFT8a4kbd*To#x(mZk!*kER^DAMffsj{lAg@qi+7D@#Zpe^C%x(0kjUwGl9d@{%(bdnsetb6u2b zHy76*lJZQT9k_B<9{9jSv2^kJpP<7D0JJnzSx&p^=%xtoLf9C;5 zG^STmClH27Dpqhz#W~Arv6N(eqOo`6%?oNy@rS`T9y2N{Nu)IJi0yAh^98N8*Joq# z6oU?}Ig<*v6%b|ImMciRNVIIL6gI!1WpftqCZS+C{CbKY~w{#W4_wmOhQSg`X zNv7lpFV@Si-o6e4F7euHjsJo-3qH|YE1-Jzjqs*&hNKRo}-N5`~NkCgU}o7LI$S#R<@T}YPM^dOnv z#rURy@^bXODzu;_0t@CEUdkKA7RerN@bNAF?dRg`Nh_?WeM~^zy8ipsurT^=f5U^P z=Mr{a?U{yE?<$g*iKxRa`_i~T94Tjcg?65((a)>D)RLu^TbP1&@{AS zV1r4=Vh}_S--j4v3KC=ukP>et#H2M zDqSoGvJ{~7L9O>;FXOU^`uB(n+7-uds!z`wqw6mntouxdz0T;5Cd~DGL z#S0HK427;lymOYcn>}&H!*5d}21q^8^*DSlk%a$D!hf2j8fPGdF>R9*&D;7?`zf0# ztPkc`eM*+N4;g-6Kfp;ygdO8seekL+zPYapZ5e3@e}(nx$-mYVf3GY4pE_Fn894mh zINA$Vk8$KfT#*7C&Yhq^ra9eMDqrSS;v_%a?iTJtB9?Un)|~$#3;%yi&CmId)cpE? zrsfa--@kq(14ot{>AN$#F%32iaExedtycjTZ$mBfCrR-Jb#4!f_6mxxjK9EpCmxl) z8pKvpe(%6%MeN8&kt}c3ihV}QPFksg-wY^lxQ-2m#nG8MLga|nY-DW-T_n?Z6wNw| z=V;9h$zA#MC|LeY;@9ivBD~Y{UiqE9Q*9$IdX_gMW~F-buS9507FaHkx?0dX=oBLh z@6LD84$PWG#^;*8i(B45SeNw5HD%S-O*`)u@dk@gdwYhOCI`^RzZCHf!J{jNHZ{CR zGKj6i?XGH6yS%Wyah5m9qNZSfjnL%~%$BvMOW=3N--tO4188cK#s=s{8Rn{`G zV{Bx8Zoy{N7o&#qGNXTev%QE>#*n?!mhaN6pgt5gr09dJ3%eU?&Ndm|va0uLVAM9= z;phql1aRzq`zhZvtF5vvfw?4iN`9E~re;*=|7KK+&d|N7aa{CglzaAE+HvcRCyK_QXVc zN~xFC)X`&|uGx0a3VLm)bb>k#Xg})RD*y5F7mfrw$g{Jn@T_Jx$ONvgf(M~Yk`nJq z4s{Jq`@B(hO-Zp%dymsq_c+Cw;=tsJtS}2pfyNy3CCP5&F5P|T^Np!PB#!73pPF;| z38fBQAr%F$1Ql2w->CY?-?7aft3I~j&u2IJeI)Sqd!?s#uBfL^fXE#+u@XgY%UcYB zsqe(Mj4IKs*P(R&`4VUsY%-(t(9VpP&@S*eHI`9OB}c(@`p0Q1Jzr{gqMwu7opI)V zkFaNXO5o1PIXgG*x7Y6PR}|pyh0~5!K`k}nW;^DtmfZ3x%vDYphsTHXi-jwXtNX9y z>kc~^gs~?oTGoHHGR_fjnz`|QhhcT=Yk9{j(5S+vW<4@yMkQI}0R@qSRx0w&O+GLD zrl}_O=7ZAKjyIxvq)wJ+OBk2b6)PNX$vKL4m$Mt?i3p|^7ZqbRXd_yB{|Xe_6xe%cj9Aa+J<2i zmv5no&sDqGl(Xff?d7Qe5VE7V*p~P3P(U!@Hc}~MuY!D!k%8OR8q85EOq(mx6%&sq zo88vF_;v&rVh`;ArS%#qm0~txyRK&pes$OyP(QrC;G{FhRRPia%EzA;D1d{Erx@Zh z!VVqSLbH>TAfA&mT)5T9ePiJrr-#q1p__UtL-HaV?TZaBm-WT(GaTs6;EA0GPWz?x zH0!eB!k(^ve--cm&p?uY4h9Kw&LUP|WXz6bL_vtiHVMp%yIg{b3SEtI?44~5xxC}~ zR8{x0h-QYO zu=mLhTf(T^NEo|wBZ*k(5U5t^y=m(#4-ODtUnSQkYhFyNbMOZA8wCv)YuZaCgJqIy z1MfV-b>Kh^1BgeCri0$`ih@|Sr#O(-%_zuDjKKKU@Ee{h1ra`v=3-7?xsj}zu$FjE zEB?KlqH6!M34nIti7oKJASZ&Uml&^BFvMy?O+#j2 z>30E?U3i?GkA~OWU}y!0(~v2T%fzP{H~sXxN5qS4Cn_H?c7ZS& z?wR9KSEJ-u<$ja=@lWX1yXytFeii@nYWKTTe09_26{F5w_&Lsvya0!#o@0FdMVByQ z7-AW=v8%h7oJTx=OB1a|)b4XT)m5GEitf=ciO&#yV{gpSYO!Wk>wt{MZu@TMQ(N%2Xi*}=j_4MYG#O7NgQ{eIi1)W&^z619)%AYq(mnn$31`C&&W&sCer0PI{=;#$ky)PQkthn)I(k|N5MA;h|XHEPM*t_lbhhp)> z=~eKsK|3l9Q%mURAL~`(bsp65Cn9$yMLd7=BJjDne@Sg;ZO4scF>l^TC5uG+0lS;h zHaxtZf1kKrBysy*bHJJFg)XV;Jn7}hz>@&@e*z;be-g%`sNpR24nR!7ze@|+jMtQp!?GZ%5=JW)!50MBcs&s!Q9e?hY!{+ zkgGk~knE9sboEz0_&8m`0W?x!A^3a4ZU@z=(L*v9`)|#c#8sFEoxOFl%CdQ>-8|QF zbeImnViFSh30o&S3c~{g+#P>BvAiK^1}E}$QA_{~?zqM+lhYn4UT=RWC&akw;HLrB z_^b#$o$}% z=v!TwYN^v&l%4@-Kt&stwjA^9T)h(np9W}SH_q#%#$2Q!oWVeL><8uWaY)ydWGztV-gf1^q$?Bf2wjj^ZR?M5K0IpMweJwQI<0ic z^NeFt#hz=AcOT_S|NNqLw(6uyQMuNG`D>eli^jEqbGdKRTGPgbsBOP<>dTox!$K?* z@HD^MAhFPn8uw|vfBENa=jjta>0Y5h53Se63|r1h6+0ht*<;gLaQ%(}9mKQ^;mHZ*5XkHqg+SeIuB zaOA{ntVW_NPajf3ldELsYZALKMzMre3d*ri5 zcRu|hnf%JYQ`zb2>%?(hRz*QWov{q*Zk2aOQ#4_3aEoC_^^v&1S^K-V1q`tzBw?1b+(~U}W(?|;P zDl4*k!j;|Z-CTZFav`%WEx!I}Q=;JIdru-!HDrEY$=W>a7N)HOTT=qB8@=qrzARa0 zSFWc7lj1t7lvg|6=;u85Y$WI1)Y#)JmwhIfP31tM=C$+pWEF|5^~X1mu?_srQ%M-E!QJ6 zU|YWr9_Cs8u^eg`BbF{gQ-KCW-#}?3e>O*>5(A}sag!_lL^UHBCGmvKikK_a)pz{F zcb;j5#^o!sMW2>0l~v$XK-o8+11#o;Mj;k@1T+bE0uO5&&lGvcig6!L%*L|Por*#8 zE{aC%+u1Z>hm<4Jrds|Po>y+}X-)5qN!Zi7uCAALVCNa!a6fo(lP-`m0~=l3Ofghb zP3RzD5}l94p7{0gnuvlw8&{Z8{=nylE(W{tc-rB}e(cMzF8(nOR1>}h%$r50$Xx-P4jcKVnn}WIunph$9tV`-d0K&w*J*kwdT+4n*#E^wQNYal7s}GICp~I?~?t z+&ABJzf^IN_lKj71|f$u6F)td2r4fxTWNszx83tBJrT6}7wiqe5L=LB<9s;D!i}ap zPQMKbU402_)U^WtI{ov957iXE*ZHm}xnFQjNVZ{St=Aa+)W(HghiTi5CpJ$@|L#W} z?7{o5AY3pc{+LZ$E7P4fS~UxwO!bau6{hN+CL4}>J^5OUIwtqQ35LBF9ONrv{GL{r z+1w8KdcgT}K*k$n9zUpvfXO2MP7-0$A3=KJ$4eZtfA-BH^I*94zu0^8aH#+OUwlf) zmdZ{{Wh-l03z=3+K1f0mQz0RQY^8b2zKcQ;Qz6NgJ?j`kk?i|2gQBc6mNCrC+j;f9 z?)!J0@8>?(b?$TS@9%TI=X?C2>&oT5yq@pZ>$yIjk0ewdMr9sm-Wkm~PCzKeD3t53BHqLI$t z3fMvn$k3I+AAVG%YvEvyT#_uzmXi%-`x;DqHgsrm=Pb4SXojnay`*h(_eS86p{i5( z>z+Z3RGIfjt_U&(Fl(r%IU26^&>OO=4KH{N&_fC**Fh@)xZ^&51M1q;$kIp;MZSw* zHL!Fu+P9?9JV@Qb%{5BcW~dp}f6?|*$yuCc;-1H8FOduEk;ApS`@$rmfA!u=RzgLf zekZj>4^0q4AnrVzgB$f|McCSiA`jI@wFj(4ytr(|{^M1aPE`(zJJYcfr`bCMpMG_< zy!0KLk5Pv3`1Iq9l7&U+Ht?sV8{>F<+gdhf8R687OGW&{w?jl*J}V6JNFPx$D>#2# z{03H;MVX;QB@o^4#t{|2-uvs3FcFg?&j)T9<~Wk9Kj#J>6TWhbg>7oQ8p%o3ZNRYi zutX8Z6aX^QG@khx+pg2A-+B@E#S)|M^jFB{WWRkIo29vOWNz*?wS{)V7fEpd$sG)i z__911S6UGvrti>exD@Gs#?8_Fp+ZjKte^Bw6~BuCfr8_wU%hAX$ZN)h1A32jI&Ufw z0Qi9VbtNMgBFJxK87MqPDf!dBnh%YXh&wB+-IgXp^#Nt%0=P}>L0S8H46NwusnbtK*(F)* zYaTXiU-iiEEzuXwypdJ<>b!AU4dU~wMdS_$pAFoN!Xd&$MI*xsW?Pxct*+)4^KEHb ze=r@a{dIYGJTt|sN?#yV@Fs73epKbk*`u60i|$6kQ$OF5R=g5zTuRHT9!;9r&X3Ad zvD3hDWCh?i{29oBd&FR>FrZEZ?nsIGRw&)@V?c%BG;3w)a?@%qf*A8U`j2;ux76}0 z8|F1b`(51`*CG6gErc_A5EQj&;Cw9iLclfWx=t{ViYKP2H`J;0NL3^ZJM;J6{a(g$ zH1c`osLS-7bhTU6SE_OjS}Z2==1gP>C%pTCap}VmW6hohMq2q{r)OwLx;-NcA`Tgk zG=9SFft+w`v%)QnSYaJ`SJ%?-BC4{^M-L3oYCP1;QgL63PbSah(*MlMzg3nvCPOGRU$T_09csE#^sa_o}Fhu}F#LMppX7(0TWk5YvQK zw4*5_(HGkw#zQWDe&1N=mv>5S{0-ny);?A^JX*V_g;PoMfT@-;5y$E)t0Q)Ry2qAr}s_!`E+RhH)>SMVs2_vvcE2hMx48{&*wSQ^5cqKHKT!!7fNXh-OW z`{BL*vMxFXC?0B0vwK|{Bg4(%xak3o(h{rcSGUSx&-R9*Sky7qX6gecf$2voJY|+E zE(Ft+5|}HF>O5IETnMxKkU>sIVA4Q6%J_rz<)3Y76#C!4Yt7yVkYlCQFdDE5aPvYOd zl2CA7sH%|{FiMFccpE-=xl%Oj9@QrhOqN$Gfo-z=U>&K!70Jy!_ph+{Tx(Y05a?Q zzrNt`e}Ib!sZ3R*s56pbQB0@s`hp1&%j{U?^9jY%K-qj-yI*~H@z#^(Z+ZF$U61R& z+>!RC?)jTYeaT`GmqkFIbo@U(bJAbHAk@FT?f#R}NBu_}r@vqW4g75b-OxHo0&Y1U z>EUEt6y2Ki6wl*#jj_5k{q2MHPydD@nx}EOg*KD!0L{fx{kj)MxmA%Y1KF00Zaonfi|##{1!HXBNm`nr4$*|GB9Fv@jk=6l^5th@4>vP6|lX8fN1fRP8y?MPSPFjqpI&vEOM z1At^;U=@7HsgUnsIg?J9x zx4;;KCG>YlJD*j9ReL?N@#vy0d}?F>v~zXi0t>0fSKu>*GFAqLYP{jpw|`PNR{#Aa z{rdpF8MC^4)c#4y^<`Seuv-O$zmEI)2bxB925t9u{?T^GY$5tVhP5O7Q3w4C+!jZt zCy(R)Nz;+Ru*1@+VWV4!uc%^>WkgA?7cQgHxc{dQ1!5| zdfd#pKbfb0uwR8g>mpJA-}^Sw6i_S6=m%Q}@r}+uEiLZfzl8Iq;+wd&1ZZUv>A8hC zw!P5*u`{s!MgM2#Wqa#xpV7ZL)7#2uTkHPKux^`|e=648HZR-ew&z|-P*;8K&I+~Q&nvzghhlKluKsk#S@1k~NQkpapS zRpc$E3o0>x%8#3{zGn6O&_^rhJ7e~xiLc`&VzS0WSgwcMU^|4*jkoQ+-AuO<-_@>=gE7$PxQ@p`R(<%)*K&c05K*UUl zl!0^NdI+=4kpV!amDJMoXc!w-)BnjEmMD&zIrCD6QPq7aD?&1>JWy50(|}={24*e% zJ*l(t1N7$AquzD=d;k4^5V;)PLTpF$+Y$YL%7}hDU%H(X{&QOTzhvgyqwWNWK<6`_ zM1pedF5e8im@77?q0=abuj;t@>P>|+q8A_o71QemCYtYmR|J_?ouw98S4SpTg64i5i=h@rhRfh$9tosx_a;X_pg#} zokrO8hd@UFLtG2!@P>tCDO|=w>Jy-16hGw_YlC@#dN!Ca(0I=U?(~eFTCnQ=DjTiO zTau`m@roPpOLLqA?hGfO-hT_=mzN+BxcfuS9h_wxZVA3`RWDRE|%B} z=GSB2d?YE?zX?E%O2cevG!I{Om|dN=m&!`{ND7Y}ciaDKwyHMz54{h43lX7|5l(}U zSTE>EozTQlsbiCMONA@v|M#35^e>;kY{C%o?*ozkinH+h8Gr40SaypOCV-+R7hQ;~ zjiaG{CZ$V|%hJ=dhNTiAcZ!u&Ti@kdk(LoVRlV!w<6}#A%&AzA#A)Ubeh>+0>j%T8 zB-iV?hp`lU(t5C#<;?@0wQ1k7++}Kb9PB={b7(1_A3UfpQoP9QnRpQT14p`OJUjxk zJJW2a-cSS%nen_Str~t>|H|+DN|Ak&{AxsMZU2=!e4IgD#Wld)*&nXRE8#*I?*F5QA zSj>*w-+Gxd?;bZhWQ9PE_VLj{HXMHwMf{E(c7xgSY0keuFSJ18Qt$)CDA%P@QE8K% z&echk>QzZ=YF&985y$4r*bAD?6AA3XnYDUe}#Ix z*W{IlgSIv4vc479nlxMU>|QV(x-|Fn zLAO!NzE2HF?X!;S&2IZ2J?@{>`}F>O#pR}InKPfK#sTLZ0R$TW;0%Dnw-uv@3+s`? zpgphC|L9~pyH^FS$4zmu|IUkErDNhA8cM}g8vMFv*^71wBeoD{WMrwg8K7(ChZ|Lv zK<9;;uO&YQ%sYicwvBc2m0gGR*JR@s9PiH0cV9k_@oH~tVlP>6iKu?K;PT%4r6;<4 zoh}NROqhZ03H13r{U`Jx<2}kDj^$_FQMjX`7jAdUZ-0QjU8x}Mro?jp%aI|W&rJx( zr`KW`MY94@zK$ajv4g1-QW@ka(1`kY!k!cw{l-w&q`T&4 zwA)#4s4;=pjuRq2L6U5ihR(t4bIy;59n4q9 zsK`m3s5=!^VIgb!wlC5-L;Ugzcu}m;jWhz`m_EZc61hS`T|q=TeX)>V`;1~ z9?6SyYReHfz_H&k{gRc3^R_mh;iv4wI#eY|x^lPbFQ5~!B$r@pc?)P2do}=-b}aBs z8SnMiCl`Lw95Xg2jcUvGE-HLdGk0=vImCT$@m8$gM8f5^0NZ$x19kz>#Z?PZ9L*WR zci>27U{6XzO@bJ1cmP+tg$UD<%p1326n`zxRqZSLQ9hY&C&0fXahWsv7+2*9PTrRm zi^lYQa{!3}qy#7*0v`|mA_zf_#sO7|jd8)+hr8^0RZcx1n6J3G-Bt+PU+&s|Ebz*T zpx)2a0CVfs`X}7uo3dnJT^4A7ItOnE-DK?3mcz6lcb4L!w0Cr?_?02|qYd9ZcPpI% z(BS+xCz9?Y?G;T?@u>NZU@M$?NCEwNLGMt+cesl6=5PpLYloC64y2AKEfdMSie;;} z`KqP&uAMfsOLr^~JCPA%+2!$7>FfK-+n`J6!F>xdIPT1V$R;Qj=L}ILeVPcNXeO_Y zU@_5Ne}l=6uuY3d)e%Q8p&wRU*A^#d$0xhl4vE-Ds+Vs_%nlV)H$x_jSfU5KGt`n{u)_yq9pnF6v{6RVV9O^#zEB@PULNDt&}?8@7wqUMewoiA-iX@wpH% zfB+=`KGpADkK4w>m>$Yya1?LnbKu2}s`q;js8y)1?`aTeF|&M~phRjH+O?1%?K>FA zbU+vZuiOD0Q4|HRGskS|SMyz=K7ccB9H<(|If=W1o`TD3y$tbfH&soBatXeVP2jtI4hX35Iez zqDHFUY1=8NJ$;FI0G>K?r|^5~6q?Ql1|M(&c6jY}nu%`<2+N2ytnfv{U!+z7lhmYZ%mXPgX9=uce2~Qd+v= zjqXyU$^h;}8pQ|fj?3^jB}^w$Eal-wp#pPg#OtfPH1C`TjozOdi{G~h(^oDMZsPyl^dEMixpJzn{JQG-<^b=cc7hyzkTqPo&1`vTtp&7*jCrQxVy06 zI;|J|;k0SHrSd_$uf?Cp0kiW5ToCY&JO5&AQRw62oITtIJ(4-MYj?N4L^R(%Ub_;W zdW47mFZ2vx-iCMM!HGElUNQ(GzJjG0W!fh*am?CW1~QJ?``z}9;3!?BA5q83)izCe zllUv;C3^#ZO>oFJ&X2!h2Y{^BBTS%)tcAX}5M~zy}mWM-+|+Mo;uD{R+|JzK- z|C&Dd5hgkgM_iKxsygtUFYh}F zcf6*+Lv~V;db|SGMGYInH8n>8WODZY4u-5RXx5y`_P+wwJMIXP%&?KyV$*Vdit&-k zYD0rw^Trq48jx?3@JUF}dK0fJ7bE!Prb3J05t%Uhfp%~#G^ha2Zj&Mnr{9%b_rUG` zH)FGLW}pSyTKAoFmg#d9M#_<_v6raB32@oZJNn0ktClVXDF%CWf)s z0_%CQQOLuIxc$TDptrM^^qsw&?*-~P1lk{!aE*D=I(xA=K5!2g<{0=sLMUQFGDOl( zHI5$N-J?2Uh;A4^Ne)gSgc1V`W(FOdvJO^MIZvW|f8 zW1Dx7tCkqxxBq-#2E8ME;@;z{+$_e_PnRT%jgK!i4rhh5zs+GDXRb*yZT_Os_L3_` z=*G~AW@kEZK#94Vpg|K%OYIsS2(RP#*-4FSRzvb=iF&?vROgW|Ro{GR@+;rjb-DU* zbIBk3god6SxtoA!2jO5@KUgg*aJKOmV16g4#FMT}Ld}B$33~7AlD@)x`WddKwZ6>y zyeDPtW$kk( z+xyJ#2%f=Cir2|90-QJIz;+XZ@H@1*!SfEQSes5tM|g$%`;i?!UE;8+^|gAGPB1Px zal5ow$KhTcQRwz9Ve8!GS0c`vysx+?aqyl2YU30#HeH&5MG>Fim>md{Zc*A2Wqqo< zFTKo^a{J&2JH`H?s8!PXiKsXf062#u?oUJ-!ak2C?d7YgltUlzUy(na6czQs;F=t&?@tTKrYY>(+W}{gkFkGY(7JxyxWahTabiO zXXrvY8L*f=S*YkU)fS_?g>bi4z`b#cB9IJ!`@rf3RO87Q<4@R%khdAxR=)&_$F6a+ z4C>C*KdJOx-_^Qw$wEB-<-kfAuy!WE+WnyoIwXgV*8z1@0Ic0$*aA?KyYBXYUKcm2 z9JT?(YyzJajE!nY4hYb(prBG8IoLKYJ)ska! z@8GTnad!%7`__yZ0_gW^8{j$2Uw}?MgXuz3#AmUa3IV7@00xYLU8CUdSVp_WDtHNI z;7$iBjzIV;;#4a#)5LhrTVLBr)<)|GvWdNGyT2bFyYtom%YtA--bl^&SUrQCJnI3r zU!`#AOaJ|y);CTgW5Dn>APWgf7+6Xiu6D%fA^F@1GBu%l*x>N2dGsKBMc3xg09h?w^`_4CFp4&+CTsPH1cN`?Zg@8 zf%yTVtn)KV@u!U%*Q$=S7N=gt?-JX67POCo54=7&cy+@)W^`bVbsorH>Ia}Df}3h(0U4)l;oVo< z%RoRux%Ehr^^qdNc~Z+-;o7+bl|+3{={EDh5h)ql?C*| zo|$}UxM@ae|1u^Moz5{ma2T% zcrC12SJf3$?MZuVm+gLFcku&@MTm~v39F+@rR%RQtwRlJTAFzxVtWV`BF`}cL7iAR zE4w|qLbI~#2Jmicup@{3wZSH^qP~N>0hh*Fn=QuqL$!*2T{0JdO#c4D>5w zcIt(WK3PEAJ8$~t`Ivj4pi*25Xxxm0*>uV3lw<}t_qVqYBhHlTB(IL&+!i=~#}xHQ z@t(dvVlppyRCupn{{D{5>6emby1P&05H*1({M;SN+8If z*Gtq!N=%~j>WP>vzYh{sSM_YQEZA4ly+5&6(F;exQ4WMVp>kxC z6)px)X%H5$5TJM+Z#u9aJuAk^*~Tf(cHmA`Yo>dFKAZ6SK>cQYX+Ye}rVAAI1S20J zrVVo~H4=VbT9-p^Y1r%7KMaMf#u%Kp)}$SSYFVY&UP=x6s4OoKRL zu@P{0e!JX4pP693mIzkux6PMiH+BHEDUuoOOQ0kKe;DuA<~!v1Wv8KmwAc_b4bla< zBW|ih*g`xwkP3aO#;m^QDEYm0{B+s=(z%DSOU%PmSJ|Jg5)o1Q5*3b*9F;S(U4L2~ zU0!G9+u-{*a{ITaGWB^BnFSq*E`}D$a;V7bAA{yAcz(fLA_@mTyD8kkTKZ<4u0LkE zFF;&81pyDrk{-f)oYO@h>YxmA0^m+(?9!CryR6ulIPiwOFW3nw}q;CNhaTfm3 zocorl;(J!y{-$e>R#JKKsk&?zN|95RiNp(~CZ;FC0oqbPD2KBj8tts}?HKc%_P!pyaO;(CRlX~ZqWsCC}? zXFTyVIsi(9&rf)a^#*}1_!$G zIzQaMCuC`Kh80ZeMMpp+gE*=bdZCtv@O|Ql6~Q@+I!yTl$g8Mzml8OgQrL$Wk?s;w zLfsUDxi77ayl(p&8*csa{q9cOX+VJ%h%QxMU?oW{O%%@K$+xG7g^>jcuM)KkU9!&Q zeL1i%!9LikL&AX8>0WcfzWD+IN>yAStDwjNHdwW_L7b5Pj-(DI=UA_uUdS`IW@Cylu1HLlaM1Hz-@`#mx1BXm3}jZsmfcVK7UZkf}~$7DA% zfNXu?R$Mvvd^WM&vw={CTVQ)cJAK`z-F4XP2OP!{qmagVq)o!E$^G6s%Vo2u&l@7X zvxW#vxWx}7oqvwfj`{%6c{nC3T4H2=4pSemzU@+BF^yo!&BYeu4?E-peO2EDd5fcS z6X|Cl{I~R@WbTDA8V3G~B3;0RCL71?bt3sjeM;B;dc&aN;hn5gLDwuL*BX!RdggUv z4Z+X^d(i^2Jx&AgI=r-&{FICLz-O{R)5r^d`v+gOZ#v%Rmp^r8F|_-{3{%M;<7JFP z7N`jnI~A(0cN9Osi?3h2QWt}y5Z62~PL!%vBbP`>`LGzvTPCXC*w5m;0hB=?NX_8)qFf2oBTEtf))Qk8 zrK1mox7nY3BHZ1Gcl?j?mM3-UmL;&c9_n?m`Ne->TwXmGXK*W_b)SN zP3T1YAYRfHaj^i{y!#)T!De=F9N4!xhGYno`UR`leI=o!96RjLp$w8h{n z=wU{{co;PA&tb(_lyjQHWJLpm+bE@B0Ph2fSzSHc8WPatqwv~K#pHLIS8t~+`>_nZ zO)~A0zbX|^4kD_D!y^7nWIB$c2K^p?;DKA@&7R3Vq>)#m=*w$ge;ugqx~8Sf$vV2{ z^egTakcbJ&b9js*R?g1P?z$!&s-EC$lZ`bxxoNSPWkJG`2Vl-IY8%bS-?R^-4l?!S zr`DtVEekYi7qj0{Rob-Ps_qU#ja+39fB)|N&i4Tb#13>fH=XAkg^vN^;aj;b#*Kv1 z^@UUK{h`j7ko=?=hxb2C97`PAU5$PT&yS6bUN3pu$C&$Wds3p4b(R6``Ari?=i>&s z;=p@gKXz4lzXmL{!_g=9hU$l2!4m3~l){+6H}(Q(KLtW{-SN?DGka{`f@# zxp)|tf~6i+r;AvV3}E&Un&&8H040hMpWW1nQw$+?W^~>+IPtpDpv_b|@xI$zzr-hO zdv4H7cL7HE=bh!QL8wf_o4l_Uq5PZdDZTBM7U>n;R+gvtW$dz zU5qpXhS+DfU^(tnO(sPdxQif?U%)?O*tZt0-SqZXoZw-pisJoWIT}Ac_c;+M9vUe) zcJ31Eg6@1Wkz2rDnvqy1M3*De3629bsuPAH7kl5%OyTU|7(fQ^mGSohr zu5U8s_<;X8FmMXljAC~x{<)6XN7il=X{*e*t1E>)YRf!t!4dIl?`MZQdOnfI+|k)< zzcJSMX^@{HY=Ck1*=Q~~)J$9m9hB+^Xc}bl7Gh^vdIU|MuRzOiHeJCjtUT2gZX#c3EMwHj($yZk6`Pbpyo)$d8K@$tmz?hp(viDL zej6A%u+V|}fEKQD0y{L)Fl>_pIMI}UDefuEt8R14(tRsWyj31Qzw!ervlp7a65X<* zd*l{^b#vL@i}4D>iusD(qli(&bi(r8H3Ac9sGTkbSF2*X5#RS0lrWobt45bRLJ)ghWBKhN`hk6>tP4jDj z&{QZ|O=w5-5p%a?X3{_eVSV>RuK2LK~Lde zX|zthI6}RZN)V4v_W0^26>uu>|Ihu3dmcAv0kfIVu3d%jp4#fr@A+IV28Ifnva8iS z{!Gkiv*Y@rc_DX+i2;aDUB5ba5AIc0S#*Emyy^4T^8RRpTvp&as1?|jJ4-alaYc~y zGwj2FDJ03bNef*C;pdiMw1^{f2?;LCrM$?6c6vz;q;+jj+ggI-0g1GEk0=M2S z1o~kCq&5pQ{zC2|)Za;j{@LUFul${CJ<6$YsCuGh;W6z5wTE(xTH!{C-d-MCENz^<9)W_d0g7#9S&3<3BTi^ff?~9)D zb|oe$kBJ&T*nn}k4yx7&%vQaHIGj!AotwxCqmxOvXnNKrVB)gTMiI@6PXb}H%TnJk zqEPn8aDYjWUa%k<4=GP*Y#}}^wP@sB^Y`RclFU5t#nbC>AX{Yrm_+*NlS3l5ApV|# zh+l!*Jwq3_*+oW!2TaK)2_nguvcwI^yQ>0U|&A-QqvDRo>QmYJ>mF zYPf#|uMv_3DDo?)AA(S+4}e~ksr~!2S8;F99fbvlGC}OPIy2I(`oZ-<%^3ul46uOA{h~>@x$ff z%VXjPpFA`-rfUOSA}~W2_TfmzK)El=Lz=i^_4>GAf+z%{xu{`>rbnWx;VO)a(^g)7 z5^=V*qxDa0?@J%_X~#U>LNwMww%E=6H$azAZXtZiW~J@JoX!s_GQB|htE7)$!@wg+ z0$rB?kv71D#u)~$4v@5V_7>uEDbzR(UNjZEY9azG+9np9iGg}7X$d_z0|!4xkz|0O zivtywJ!_0u=%+sL)pi!(sB4PvZdSLEWEpl}aY@)QMOcrv1S8eK9Y#UTDjZCI=>0tj zI*#g3p!0abk>U^$YYIlGT?1ur(K#AGc62^vg_*4(@+@j%9E3Lvwqp>{49H9@V^+|0 zna;F*=WS_h?~Uz~vaL6^wdl4@*|xBM_6pWJhI9W7kiILC@d4$46b6AbKizjgOpIKf z=$(C{|dQ|@(Z5xgZ0ActZ2Y@vbD-Zw= zZXN;E%)m#+pt?SKC%f4glPwr#_9e7PNSZ|5QYi{xfv8)0@;08Mga z1ZV@ULj0*ZO}aM8Cr)_9VgMsb%`hqoqRE=XwO3a4xQa)ppDKR8=*=W|TIkC`i^(kn zT{r?<%fv{4>`_#yrHDEa|A%DLI~sj0pSX-*O}7s=*Qdv=xK8q|pV<|z)b#;>a_R)0 zZ*$@wDvZcK)gVE;sl&t)`6A))!2YR9;d^1~&;tTUxm|g*{*TEt-G3~o`m_B{A7S?B z9M?GdF>+4Io?Z(7f|mayE2hXxjXuf5?R)?CRpf1%{9|Q(TPE8w`DYLq+xujDpKR}w ze=MzS%jBPtiN#|~FQG_%zM4d%_>z5MK<)Fy-Lg}f#tF`jO-+r@M{h8NB|ckQq+|M^MX7Vwzis4_9zs z%^D}qn^7fyc4PriVklubu9h2asLF=pR5xk=mwFux9{K_j0S^{^BIoyX7DFce3OgTO ziz!|Dd4Npc6~BcD&ZJ(UTgLfnVtN=)a~R1!*{Kw397(Yf?t6qM{vPM8M2(m-X7|^i zLS*H}&FRXihkmZ5ny0kM3idUT%--?ObgW$Qys1G82QxVB_FZ~^X1=S2lsJ)>Q?~5T zziQC}l}gijxhcmP&-@*nNh_2{YD6a0z^b;`P=`Zv#w&io&hN10X)|$0-!$D*R|bNi z#u@PSld!8+T3}m15&-#de@ri5g!f)wW@QG;Kn56XwYnHdRh(#{^NGN017v5b3erFT ztV}gpGO13ySy7&B5;^RcX&a)~?J49f9l*Qa_#&Qn0;qoSMkZ*3%}u3wEI)LDcVyKIP1OCF<*&7{(sm4Y)e z6G0orTRoEtyomowKNf9Jc?UfuEEe$d@hPtGtx5V<(Cz|qn|HoTF#hru;(cXo1=I$URfWO-BH!iQ7=z1#FYNS{he!MCn6GB|?Vf z(W?c-aQ~h|;c_0jf=Wj(+{nvoI$7jogsqB0OQfBcgnSq7cR#|ed2V{(7{Rz(4AY78 z5K|dB}mxMH&`Yy&Qi z{}|nfG#wXZEN$*n+Ml1GnA|4t_-f@JdoO&tT<1If%3{rt#tV-jNrr&ca^@43i++og zl*Wk`&_-HaH@KPY93SrWNJAuRdFJ7xf?FTMXP7%H4eLKde_&O3;C0>azGFsFR}<09 z*wOE-er~?c&<4^PL%V6#gl;5w#Yt_H??oC9`9mQqN@#o|EjnQzF~Kp#hn zPBLFt&nnly=O%Brub}Cc)ve>r8th&hW>?jWuFe`Thex}PCv71DFy#=w5lzxp9I~1i zm{0bh*+jD&AGhwz>(&2=0^U>QK30z4M2D>pT>+_YzPy1!*^5U*5&L{WJUkDetX1EqV!&Ekf8I=F%*^oM|E=feQP-8Ea^2o)A8+)h#p?tNY~O z>6{Qk%`?Vt-X~@VuS^GIVY@aMDQj-~{q4;v$V;ChTolVR`A)*$%1SNx%M0qCbQh+H zw^{^rl%%Cb|k5$AV$wN@*3QV!s04>S*R%qWE8uUuw&9zJ? z9&T_n5*I`+bjv?m)%#Iqu*}-GI0~zl%VdOT=6~Z%<`AYP3gu=NG9JwUuA;rUFdGkP zrWwl)dOL})KzA>kqy+>|ZA+*F&UxDe%|v!-w82M8>p5 z`vaiJBWNgJEai3Q7UFKL!`c1x3#92pg{YhFBc_cNTHVw=R4z_Rqw1cp+?P{7AoiSI z1KM1{LL8ythm_llkemG|`~JF~GVXnIPwosSmAuVW?N75+QPx!XBcN1K_RV=|!K2me z=Zg33jm%*49{u^r%y)COyW3D=iC}F!yx`EH%|`k$|G**KD@ugpMP;mPRQRO#rtC3k z5?@b7X1GLjsJYNCkKL;D6c}{3oQ)e&q!fTnb>G)#61`wJA^*^Tikq^sKD$V>sq1yq z$=bE#rD0`E@sr%Uw=+D8Lr&tO+w-UG{yl(srXB7@iP8_zE>qWGwgocsTIy9&fBf6= z@vG;w@|AwA51aLxcS{zzUS>fPGj{CqZW>i_V02Em8GU+Kel%e$WqAdcpDP)*dDop_ZO3AY}SJo+)G# zGLAs{0~HGK*D22>+enZV^1KSmozx5K{Ayy%mj&0=g+_#Bxb69UjMH zsmCW#BR;3eVNqq=dwolq`GN4e6_+NGFN7^jIwHS3cImz_cxl(y_5dT#KY(a1LioL) z?Q?{O!bDPB;F-dP6ZYNbGPQYh$jy~kK7rog2(st9rbPL zn6b<%9}T!L4DN`2xv=EcRxsiUIVN_D`<66RVG0Gpmew@&6oHFp<|6UA1VwviG(}~H z0f?4Z2Y7oSpziS67&fW_#etE!W_g1eG&y%R|NX^X1Kt{jRU1|V>LeCJOwxz=+1DIpCz=XU0$|B=gafc0an z`IEqwKwj6`3TZ*v5zlX-d-i1s_s!VL=M{`qEtaA(TV<)BtPrS;>B0(O1Tn*ey*Oe7 zRJs&i#?9v|eN9Y)lpm!pD_3rzcD!S{wx%gX?`CCr?B#a{f}dnav3U@Z(SQ_ErmJVNw3%xdLIcM%sHYHE)v9T5Ht4Ps4r=_FLKB7G*&Np40B-1%#vW(HS z*LtDxE%?3V@SNcQfr|Wvy?}X0*CEdUz%f;7oz4T9Qy-INeJV*6jZ2*R?r&5V8n4&( zCKPT8t8}cKO!u=rZM@zDf$|OMZWx_cdNTPFcO0z6c!ANT7G5Sxc0|gB;xdeLt-pBu z^0I4keI5PzQ=Hn-U8w@>yX16~zZK_Y9KkPslex;)n{l~DEpBQH!C8hXiE#WslXBMJx%TMI)=U4UXN=Mkq!GpZ- zZ$1}Jwk~ka-HN1Q&XURCjyu#<4f=fk*XZbMA)Zc*AHOb>7`+&HNNQ`q3Le5NUZXqAYiB~{wO+4Thy z`rESL+k|0#|G+%8le%Gkre8Qc(HaoG09!_ArV*mXPcee)Bmni`|rEeJjI0J;%QNm#&nWHcQ;;du(@H`)c4T z=n4|>B(TtTcSFQfn1gIwAC-1-vZL8V)XGvLan9yjN@t{|=J!2k=g%h%zL$9)%~kK; zJ7HP*QPtV+X?XqFbg>iRSEes}4-+uib6^aDGSvrW<02JO0>Ia71dLVI)ZG1Y90@rH|0#?K#vp#(PcAz1sXIg0=2XYl7v-X z@~gFpzc_HtHBg0f-r)FfhMc~{vQy3#kJ`Ry_vcRc{jNB8_fg6UE6nfTH@V>C{#Ue+ z{)au|&jAVg@}Gm2d(?P74BG?cUw~z+f}XK-4rDM2KSyBrHu|i(=bS?C$B~qGyltDzzqMjd|=IZW8{Ge^NIw z3p10IJdx9Hc1Z1Mc}%r4G+;|(VdVcln%E-Uh3dDX7iedExs7{O^)m)j#@;`QGmm2W zA?w3GB&c{PK_ISA?{gthq=is2@Ci+ofav_F2Hc(+?%eByim)2POq)9cpLcdV%yKe{ zyya!1u60@8sSFp+OW&;tCA6mh^-N2 z<-O}KTIfMBj|cQN8rUTGQ;;(^^@s(X7lQ(1{0qM(gtUdJo_dv~b)=49*AZAsZu5?D zkz7}?Yzwc^TSv~e0|^H8(K@4^{U-0G=GSp+o;~$O<@~DrlJH`OPEYkRoCg^pOMU36 z197N%>ps+K%0-wRA`W&0BAH=L%%F`XiO>wVH}0e>_2g@bN`B?9n#QtZaGG+An62zn z*w=@4R$62_A#q=oaRU&kLN2Hn6oSr?OXr~?NwO^{-nBvjUynsi7t_Yg>jPDW(G_Q{ z?o}w&Cwz=Y_4}P+`5`_n^cmL25#x}gap1%h4m(_?;sI)4C~VRvqSO#NpL(G-u%q!o zu*I>k6rt`r>mzqPghjK2p6va;*cxE%r|7(=&BJ5Z^r}X`w?|o-wYI}1!e5=rLpn$N zZll|v$nq1zXP9m?hBT@u5;?Kq&Mtjm&#Bx#=darAbp1+dBPruF^|jx`f>ndZT$1}( zd0fE=Bl&VMFidgD$R2PiE7gABKiZLcuW+RS_^Gd9#2jG{iuj6x6A>N* zv@F`0^@hq*yqw^ zsV_){m#Nt#H`8i$9XIa0tyln`5#98||mmm{rxE0XJY=se-Z{l&Klt5nwB*VObWb@Xh) zsNI73t!|kk97)EX6{+H&#|jCvbAXTwuLGbafgylzg)6FlnB}!+!K{V(gT&Uiua=#U zjUtM!6>RX>m2fpowQ8Obmqsv-0v}of#$y4~p7aDd42P`SdB-AMHV*GT%db)QB3oxb zARh01D?u~-)#+;&%wv+Q1B!p_VGotM1#UC*Non8CB^A-u)d$F3?2m z9OW(*IK0dr;8jZfl|E1>XnJ%D=rP3MZ?e0xM+-%Cb}OD8pj`GyC>xDlIm7$z%-QZQ z(~44(=9i&EIO1pr`yllk%)v)yZP2*hGiG&Xm^*yoaLSLM4vBkC{D3&79$_8Owih9- z#m*3BM5BoLfYMb_6z+hz81Z#XaNA~H`l-&V}Cew-{IaOA{{;z+u2R2k{;8W*zF1Z#x0R9zO!>#VUcUuUC_Nun3!b*?U6 z7>kZ#P1;CM{pxF7X{UMhR%qXZq|eywl|!aa_gz?So*RpFUIGAW6oexFjN8z;f~K5s zL~SZe*9C$T=A(xq=^2MyU68^7O8ltdMj)7P^qtf%w1*J>s5_~!AthQ&0W~--Ys`85 zo%O~4#@@HbL;0?Ik5WR6E@|e(@_0%BsT^mx?3pU-YL@pqW5d9<>RzOc_(kW(~MDitcUvKkP zrPTV)=MQq-#XhBqrB1qdWo^4P;^7kab$e|79@AGbss|swiaS}KlVOzIm%*2N>hocr z1~b3<@gf6`x@g$-sK%VsL_ zl@n!U3A==^{ZkmsV}ZQG(MzH8SV4T(HO(5%!Qm&*Wh(Cq8b2r$2|3`?E3WJrGrzut zZ19Su1^Xd<09shHjYDK(SQn74Q({GQ`C+w&@RYFzJeqakgL_)jmq}7P*+z(*d{aB8 z_;Cq-+$#C29}5S#-1t90Zp6OsW=iR75cK|1_CA>o{{u*%UUlTL1 zE!!oqL2E;YQ4-v@gKOJe&p+U8c8-u3lW)aR3u=S`P7!h(s`J#tu+9c+z0ksM?)AD< zZ6zi4R9FkeNwO_Fa>grJyXxwyuXX@mywuCym`8pg>q|#o%G*bqVi^wCU!IHG^}fa6$UBjZ6&suUSo;pg>%!#Ld8GVs%D%I}ie8-|c(mbu zm}s*2J+8@5kAnw}hdWq)t|Z=qK;>cEco!;W-H?38`tEn@Nq&b0nWRiEsZBF>#8XQw zgs%@=?h0QA;#m!QW)F5`Ye!Xk&?+8Waol$F>j4?kz_%}_+rkxv#g9FEcFI`S80BIL z9H($>2|ky#9Vphl!J@-zVs0hlPCFR8(;L$UUpF4*9`}2?VN|yGrC_3o`(5LSTA^0M z)sNVT;8nKxh3#iv#1$_)!cDkb#x*!KyAZc7)u{Q|ra56guQtBkClxt(fn z&!re9>-okp{h{kCwaCeZ%o}?@6vf9Xk_U7_w zEu)0X+QfrAr>fC~hEj=l-@i?(NPBkj<{ne2yF8jp&MYP9*Px&rj1;my5XU?V<_TPM zG;VA8bj_wFRxRr1lT6^iTz6=eW?gv7EVwBeu(;K0y6=SW8D+c?zIMrE8UzX47r`>` z!X*ndlRBJeu!%W~e^#(VE#k^Rr1cWOu2j#zE>1Y3xTk7Ad50a>XRY3&<)J>`Zud|E z^2ZN|>Zfdvxp&+HXjJdXnmi`xFkYj{NtniZf@mYUg-|+iD87L5S##*lbW7^9KKxSe zH$>=%LTm?n?&_AwMFp_=ctDi46~l?ES;zK;)fmk#;cfLgjvcXO*UjF@&ZQ``osx;m7u(C3H^; zQhVtDO$O4SP>7d?>gu#`rNl7W@nN+)b9E!_7JJd`pz*}&_jv9oCetX<2Kd-@T{=YBs+>6Ta4`Gyars8IIGv1^IJTkg zb6NT0=jFH~#ygg>kvArm+n>>e0 z9goHDJYh9r_iW;0vdOOmVD`5EP9sQ(0J`@iENY?wMD|$;_f2`?@S-;p9NVO03<@VC z7diJFBfOnhr)GTP7t*2yn8R*-hnS+k&krK@_OpSZ{qtN};NFgrjvhkJ1@eptwgJ=n z8%g>9nuzp2S}6j_If1-%XvTS1m*ow!sJIql#M+KpC(#~F$E`zi76CWTcXT_MjwGJU zRotl3V7#`j^sOMT3!mQ-2<*5cIhlxhU}|F~$Zv-{#P{mBbs@6B9)WS`IcB69N|35O0=&xb2|5{5s`%%|IzZ|c{( zr0mOhV0c;}M|%_Pb@PkZeSS{9KCH5!X>hB>#JqYrP0r8Jy=m!YTsYztI_3S#fB?A{%BRF}@AWS?`lRx^2 z;!~02$BA}{n!yKZg)nzF#VqUM%s?XD+*exKg3V`Zdu;1|@K}C0^u;T%DNY$SvNFQVZO1DsBz!h>W^BQ)JA^ua( zg|oV225X@}dzd$D;C7G3BYXguSfk5H^Mu?eeIv9jFS`iGs=A0fiT-<1uWDDs@P_-J z5 zMg&&P)(2 z(LR;u9i6^uzIh?pyak#fX`oH`pXapvXT}x%d+z=FPGeR;{o~}dM-b^ikjE$U>2W;| zwuWP$Hsu6fZ#Z@1eZe!Fg=y5Mx9_#mf*!_X6gS7T-}-QjhcOB&?xMjRrsQE0{5wfB zdt>kL>t$3j%{z+aj}=-x8mYqG=d9Km!M%r5IisI-?{24q<>vK!iM1G5gMjcEfu4nA zRuFkAogoM^8g$DRHJKw3?UzhEXnVCGmzQt$vTPShl|uKJ?_3l2d6Eu>37u#8z-@r_ zisc>1V!mVqrVL?(C)R`d)trrIa~!w?k}E{+4NCE4x*g%UUwywV4GuNyDR*3LQ_*`x z`N1V(VXoQq%r-x%VsnZ0adA0&N?xovDP$-i!i8`|>_#Kl8-O1pkqgxw1qq-`WF05> zVww!?K21kH+LOj*RP#FTSy4wTSxKe7EWm1;{3Z2A^4oeRZv5R?44~h57ydm0{hzt^ zf9qi&j_@OBaeyKxK=pB)`x+Qnp4b2iu9O=oqRa`r^pA952iW&5EQCiU!|ZwJjMsvL#W(<*cMe+<};kXOBuT`yVBJSl>(k34!$85b`VeXX`z1 zy$Sq%Obu3wzcUK^KZ3K53A-7r?fe!sd#b#%=w{U0F4}l-o35>?s2)Yu;=;Ywv9_xk zT*%cSNqR)Io?C#=St>_$)+V#-)AjL@(y+0>BF|%O8t;XKi<=}<6FS~T@yc?gWf?>k z_K$1zbJ}w{KRcQ2+;zI_l5~pcDhW}k*uqxX1GU0-zGD)g=?VH9Yuk zGTn$G#g^t|vE_0DyBl;O8rx5{KbaIO9;`j>R(Zx=^a9U?{6y6_VOVbJq>>g}U$4G(Z18ttZcGE(7 z!e*PyR(Y$&pm=)c|4#XK=r?YR|IFF__uTsra?V%JkrBS#!O<JXw8WNr{u#HEA3|yM8-in*D7rOa1XZ+cm}S_Yvo#i9bl-4wGfn z+dh!NcNV8%k%UPRE)E~^1y=^Kw^#xJqrm3*NY{`yVc4?Ca?N)>`JVIx-OrsI_|Gon zWv*qdAF?LYe5g(rAM<=B5iv-nAE3Z{J$~_C)X&<$@(l5Y;y^U*eV7a9(G^xuKdr&0 z_&~l&Q+8!U?Bf6{x$3ab%F4>E+KUjV^ru_lVz7KQtB`t}7@`>LQ5n+Pa0&|ZH=U{J z+HcnXjVDC;vS5IIW64g1mvR}ktJ9WI+5}i*H8r3SeTP7|^}t2fV9H-2wea=fLF-A| zDkDz4RyM!=K!ePcK^fEc(LJ|!n(%!_fhl%su!8q(P$HSB47=nK*&DG<_ziS&)U?;o z(SyiFem5@_ucE6$#m${YyJHuqGYW=MKA*_pvwdR-e-E(!tMMNQq`_47#<-3~18(|Q z{EVLA(?Ivp@}pbuEp9h*7b$M`vZRgbYj)}68=Yy7+rJMpu814;W^Dj_J+wsTq;z1Q z@6*;du{X$G#QsS{FcW+cXM^{==496>HPMN*)LUhFxarLq{1%lDE}ipT)#kTOOPqhJ zsqGcje8T-(?Re0*RR=mgYAC$GW)u6his=hZAChXxqSb2dBvSwOFPqan2DY?qcCv4M z8()0E+5X(+Q`^VqUV8Q{-2#ZkW(KBy8O4jQ#Y&dX#;Wnbwixcof_&cE3yHitM-Oi8 z68v`e#i3)o7q_9zR*~PMk@d55wfJfvD{DNxHwB(kBp0)FsT?QIi_Sc6g}&U+3fJ;V z&%9`CGjZtB{`y1|_Jif0Mj)=S=cHq)Nf3&s85dV#{&b?h^o_?bMbLhic;)La?h?to z+(zrqBftG zZ{wBoRM}2;P=514Dqrn#_j{bw^UhW)+(bLoay4u+93OH83P(`g!(CU@(0vZ${r<5f zQ{nwxJ={KrqWi2w6t;%k#70vFJebAc(?K-~HG+A~yM++vq(<+Z)}>#5o01mGa!qgx zko0SPTO3U+HsQV-RCwT^uI4+V_q}}Wo4h}H!`iD6J`)yLW1SUr4%!69beD5idQA4? z;{65qdahLAN({HeXi469=SK1qlb*S-IZ`%eb+;zVlC?ir5Wdbj5{zcJOf53by~DxY z&DU%`B|JS4xP$-cyGtj2+MS*;mMTCs+*PJy5rJErFsvI9mM2ZQG32SZ>4U?4!PsZ! z3>oX1?wI(^MbkG+iYv^YvPr=vGPSIOXJ6GULOEHa%q?c847~nYkB|~XH@ceA~8?E(^_JHR%+LnKhk@?SD z`wwEEP{bb?&b?}WmN^^Pn9%s(gH*!jxfU^s03t&PKE(Lm$uFeXt9?Fc}P7Fu{I*hF+n;ZRoWw^}os>Ub$(cQyWr!g3CHiN+FPQZ%-Ied<^isQG^N6TN6 zns94!`r||W6?cW@DXp&%el@;^YGiH3CAZ;Ct923kGPtjlK(}XsayWTh^Rzj_JIL}F z>UhM~>jgvsWdxf+Av*ca>P738y-92NJt7=SL&L%^%SszyCoWsM}PbM`-b*HB8xWLwlSN(u8hx)Isp4eMTAXpFwj+dYfd3ZzUeY;H+a>wm5( z7vXWm;JnW57VCyBuB(?cIk^xu0!McQu1I+%fxdw7>9VW?ty^kSNC{usvS96_52hbx zwO)-G7)P#zBKC{3+na-q3Hosap*qlEL`?;~hqG+qQtENG5GSqhxvb7@+T6)cCMEo->!Oh9tIPV@z8nBMRXWM_a#K28)fI5E8uDWC#Qo# z*LcnN2YK2IbYJLFmYQ$TL~45wCmh`$^yWp=Ql~svHwdtTW&I{q&uVdo79wEdudA2e z6>NnP*rWdRou{s`YL>6>d*{_0N63>xH|l_QVrq%7QtDJ&*5p;m!CYdKqh$RiJ>2jc ziL;eaOj&-Fk3vbQWf|lkk;qrav{Sz7ym*A}TT&jw^ye{2oD2{w#D`wXYa#S|G-)}B z(ua~azDk?fU-HE?>ZGBkSU`vF2(QT5jCe!bJPvoZww> zBn=IhBO9v9!WFMf3%6=q{QfTgTdQh~QG>2@qN1!}g^}#)g^Iu7Y;trcM16+&5rgg5 zMcxDe9Am!QA9ssFcb;WGPHG}Bk8464Z8v|`2snA5py=LNq?5uluf~CS)9KIH0I&#E zgGNO%0YOa3g90!Cxyt+zdNS^BxZqWP_Sxv)y3+qMiuEtQm%J(%s3IO7K|$8hr*w{jLa?WvWb09#B(Vbv^xuq zf3+Stn|dZ}Q~d4^ekSaVNzA@iK%Yh6+r`M`;px=agSA6qj2&55_0-l9bqunh9ETT? zYF^SYBIRyR>_ZNeyniBVcBoK}Yrnzl-w3W-Jx4+KfV$Ltxjo7V$ zt1Y@_oL?qX^A|ixmiy6;NjY0Jr!ph#dh@HaRrr{G0sTDSfC*K@Qtzj!=6V;pE~7To z?Cf{!H%&6s(`inUN-&dH|2(F0uhQH7+d|gqMIlw+AEEDI&r$l+j5v`9w|y`-eHj%S z>@dP|*qk1Hucz8GU%<}RTb~rU*i$guBk+nObs)nRUbVZ)#?s&%c|s5iccbIWQ! zrY)k@8`eZKrRaH$(O6)i%3ETCT^Otr-AoO!8f9{G#%oj1zKARt-o;*>yMOs4XUIlbrWIaHB?^y?yB5yAefr^|~B?tc{`t=GI$EWk}$C}9XTi?`QuJ@u) z|IureVUQaSeM1IA+vz0;Z=g4Tyy>4B@TF8WHA9BoJ+eln=EuA#l0Y2!;;XyOmX0I| ztdbpb(aD<;J*;-KMn71dAz!B!Wy_lndpA`&e@e>lC!2YC%edy8X6l|Cj@U+xGeSWz zAkWhArxt=WJV32oaDUOj(hMB3qrbS<<}<8vNh|Bo7cpG|MGWK*yy?8B0t6w4GP&~En!r#&fCu^q7{#aoCgCG>0LW<|dfxAyki zczE5x&CQ1LO=RjLTrnvgI)fe%Zy}dhHtzc22p>EisPg>5)#w4<{V84JCPyq+=iJ9! zNzA+JF%pI1U^ty{0-P zs4X>FrSmr4mb4tGtR;WqMcc9Cr0 zRuHuKJYrx~DM(kt#_Y^SLX5 z9}L(^D_rMV^eDkXs+W0v|6h%lM#SpB{If7?fggSXJGu!f3*aQqiUTL*$pm>dnc7BK zG?I_TPCao4fu~p3jZEbMLl}}imBTzqq?uA z?#J|Q`8z%uX8E6cy%dOFt7w+&t@^-5_P~w|aUASSL!;P70Ql8V8T%>KPp&2-S3^xR zr)c9nm1hN_J9F}t^0w?BKW)c021!fqnq}1E>kMjg7Ds0 zMjcw9r6T-S2#7-`u%LxtU zMy1R{jUHIK`RtNBS%ge?$XQ1H7`lml;jRoXE~D;zf~cvKc}u(^L?xLmqefq27?K^p zjvpz}ZNN0wj!G(4x1=%Gy`6CvZu7(qPq(d5ma~`J8#S<6(T;<;^;?a zKl?S~=vKsm>$TVt?CtdxbU5{xZ4dD=DqUdvX9=R0Z|#%sRJ$LcRJ|sd_owX^0vOQ< z2xNgC+t0oRThc5;a3VG0Y|D>qZQ9_TNwbgBxT9S@Dn(yCMWPAQUV@+Yo^9PLlYr}R zVGC_W_}YM4Z2>nQfD34;O=?ky1Y;pu&yiL|oVu}=WznrM!n9mpPjj|kUwKbX)oXNJ z?{0-e4VIpR?4@!Plpo(fK?Q1qN<$577+LVj_p*3&poe8eS6S62t_`R9?3-WI@vTm( z*xPC2dyQ{uMVYP`u@wWn;ykQ4Z7UAvinF{Dq^ty3|5d@^;|^FcAfkL3B@Wf?CA*?| zVTv?z@=8H0ChY`BZJaV6pIVCb&#b@4ISRlujc zk0{Usnkl@K27*1`;OJ+La&fQD8W4Ox7!aO3Yu*0xkZ(+D?XFF7rtb}j!Yfhq3VN}E zysV%`|BKMNFfdpTLqCQA83g)q%f4t!P64vk?OJpE@bqEkk7i}hs|wPuh3(BoUtJ(& z(st!Wv_* z?aDr^$ivFH_(gfPq90cDwlO6;@}L$1WqD>%V_1H1&6 z0;=3BKc8pH!C;UDcLqE+LX5!i1fw5$T=Nc7%`N+=K|!B9HR$jrd2@D(f@tRp)3cjIoKaz zpg9u^QD_1+#p71Q@x+_h-v~i|%|rW$Su`7)PG3gh!2joYyhVN%z>1*u^B6QLPpI(b z;K@h^QJMAW(VzEDto&QyA8Sv@G7$AO_D6(e|7N6#pUB}={`vNI?5JJpoA2#*hKR{DCBd4Ukuc2-GkKCX93}NrQq?yc$?n6i4)NB|iV=GU}240(ovP zxy)x7HOJw!vE$zpf42}5HWthL0t#Y92n(QX2Bg=Bou?F*o~%Z0^$sng-eFhnUfH#O z5!)4M`5T`SDUfZ!19Nu@uZi^H%7wtp85wwMLw&u9&cu>$I8;`!U+9@g>Sn)csf2FQ zKFn)>_Cs*FK6zBD2Fwn6fd`f7j|_32G=qDEhM#_DY}Ppc_Th0-QOC34siUW07oDze z!R@%NIwKLg+tVP=SI9Jsj zs(gMgH|?LV(}V{>LTAkYr_7(@7;>Hs z(3=D>$hgHE!`WW*j9dz_)5$lVK?vt^nN>qy?3+bL@`3#_e^JiV5#VKzP-VO(y0 zo2>MFx0!yzkJR63scZy8V0l5Ee3mQ*`20q@??s}6KacQmRue(@(StF_6(pU;Sgh1V zOa_k5UPcZ0A$P1HO7t=c*&hEpHHdp$X7w+3-(R_TWy4m4pl?^G>Qd*My21{(H&tmm4}3rhTpvfl>a!t#*0=qq z*n_N!r9P#QNO9OR-YD3ixrYD4l>fKha^Av=;Xl8|Y%^syIlFcga|`)pW=kR2y>ozN zg=gzB>dm@eDfzRL@f>;0iLdt*KQ(vd+Bm6H5tH<)Cte@ktCZ}fYo>x}->N~TJ7NYr z@9kmpA3$%rE8Aasx=7^Q|+V$$~;L^R}cw?5`5sh?$`u>iE z1+^bo9r7^fW~vzD2!Z}4GUz2^8)5!j#gdI&bN^MLE~8f5$Q3to#f|)(8}s)Iws433 zG>MoPhb!xt_8$i0+tX<#H+Smosw{ERZwP6m z0LO4nLcP_XJ@kxw+ROFppW6C7N{W%~t{PN3e#F2z_GHxV<$Z+=dAR+z6u&na z)P#G1ZwVH9?>gvBWlH?=6g>k*uVmSYAAZn3yAZI6S?9mZiqiMQExH6 z{Q6?I+oh@{%;y^-R=5mIcGw>^z2l8R7=8^J6(p2tbE3p?OTu=O$-jT|*e|*_H#9p7 zs)v}nJ%Ca@;jwgq_|5wdfbV-;zj1&5bwJB+s}aG2%P4!gIZ`!AVZ{?)Pg3LIeQ1tL zo!ZhY5t=cmSs;Gch54b<01beZSuGBM1Ny^ScX}D6 zSk54ELVv1pR`DUl1&jND_kA6D>lqC42WL3u%c%V!!J&Q`HIulE z5+Uz@9HsoNBb!g*@y$Q~!LEgB$?Qf z|G$^ug6@I()(Qfg6TB8!6B&tRRM=L)Nn&R>_E%wpz>-HT?Z>1r)?vG`AAufunw|uSsdF=BhkgaB~zV{7f7q2muMm{||GN6WEL!$NaISX%o|v^o z*l(B(;QvP#MNK$kXk;(q^7A#={BL990I>QURK>WrWjAf^lDiyZ_AQJ9471U%T0>fdt;c7Tqwy zc{;#}KV8N?B5S?_Pv*kOKic-c@ye?9MJjx1kK9Lu6%Xu%AM!acGw5}kAD{3D+f6N;t#h_fGH zBA}@&#X!p|PZY4U0d80Y^CL~|z+ReV7vTQCmk_JpLSG#$+13WDbpbvYvy7^b2WTcS zAiyoJ-yDLJ47t~JnaRe8ZV#^;YQR^s8*zin)PlYvYYg(Ahq$daZcT9#mpTzfRHkyyEBJc4Wk$LW+lYHJnuluTT z+0*xhuM`VPe^x}X8-S1L3e{QFfHqZ&V13%GFk6j2R51KmPsZQFW6wk^Z%}NPZoU=& z%T1pRxfh7tZ?8-k=bWI<+<(@Uv2X8D`$vzXl-||mGO|FOl}>86vBTiz-CFL&)zU7H%~d@8iA_@DN=s_^-EsFiijnO9GyVimhG%zWz{ z7cn{WcIs5JL?Di)7FtHQW581reZ13uV6Uct!H?DVpR0J?H50jWy&!w-*5*q0vR3#0 zUq1LBikaQ-+(^4|l|S-0MM1?uF?N`9l#ZP(!!|8pdOUwcHiVo%lA<-xUJBi3mVGxq z0>|9?5{Qu>uCx93Z#wmY=WlI}FJM0y@!?@dPhL}N#~A(&QkDS>lgJ_cjYzc$dHz(d zBhFr>kFgHW>fPm2&R}4p+49ubos*gBJt`W}+i2=O*L574UR4Ur9QewVAc9yS{O&Kx z+3z>#V~BtX86K6FB{?g+nF12|DxNQJpe|Fdn_@>dAUYTJn@C4~D}HHy=hJ>JrBhmG zHmwp=f4JfF)gs0u^u#EHwUmFSpx<$<=m4Gr9qvDJyK)15XpZ_ZsusXGx5xJ}86k$~8ORMevY|;QyeVK3PRc?)uuJ6o;&6acKg4Y-4^$rlaP1NMYpyo<^u8EaT%VhLg~od zeU&~bPsj6b7g_M-jJVe1eb%rt^78xFFvaeh+Vm_y&<@ct?2WT0NM&#sy`yDFvHMAP zc&;jGcu+|U7A|mzY&#KtZ(ir`k;le>H~aYIn*j(&xG6&@)dqDQYNin}V2xtaN0C>GVwxW__~K zhk~u{Uf=znp2$@_9Q6)-$sbz5AOAw4_{vBe`0tLXv|3c7%Rva_6}_y@K-F9{+8GGAeKhxXoOlur>N zzKGF4nlL}y5H65OKZN;I^O%?lor_-tEY6@X=JR&+q*H1^Fp!5w?tb4Z7l1q{x7qD( z=U$4f_HCZ*`6WHNA?i2}$6RZ>nU0Qeqn8D$5e1!2K+_6Hvw-saz#8sgOFBS|xAXCW zfu#DHT_rlHx65^pT7Qv}xb)4eTi@puuV`Z8b#)UllF2WDlKr>)^-r&#Q5^Xg`LYl0gLW+xtz*|hzBRNS9)Hx|cK-axQGScwqUq=B_B4oPd!??6JCXe5 z8C8o{LdsU8SmT-Yro+=)L|-K58y(KM^k8ECm%RO}%CMu&P6JqUO)+f7;$a*0)L_^r zU@L~?%z$~~x>Q_6*V*_3Rc#f0>d$h_V~O zd4S3P*9ZJJERMeicK#Y@Pxn8dKeYNAVF1I7qc2RwvQz`dn0Fbf&7t+zjx5}5lI|}Z zOepDJztpa4vT4B`0i!o=;wnf{2Ea|MJKCJgz*#+*Ml>bLUY~NvE_+kp*y`me?{n*c z?8E!1kCsuU@>`UbFu;ORJg((UTQbI|CTwB9+66&5IQqE}4e*<(L`r`s`!OajF&uLG z@T-;%uZMHEaKMPpGnIv%%3!&}=FCU%KCdfBm78sCyB*A2wNx^Tv(`OcL@gd)*xr9= z{8hul%SQw4W6fAbr#2t=U{L09>fe79VWS>Hz5|S^Ma*|mqu^rJ2BfQBemaaL zABbiqe0X;zFz;|UZ9?I_E&um9(JwN{tJAWP%P1~l11MWlc?3}ta6u0`GbDlp8+Q$aPJC+gwPqIEIgAgy8adXm z?-O%Rs`Bgd+0RItE0LCs@cPiZ5k3+S_kElM*{*WHhipriZx~cRT^aeaEOYEQ!+?0% zRx6RG@_J5+vg?VPH%%XvR(!q8wRH{449-LN05v7|T279hK8>Fc*-vP~MHtAcdK!~_ zeDuSX8b}=#X}nMF_n&^`e<4wR@fNq(D$^F3cb;o(x`X4N6B0g`ZLEMoEJv^&`Z7afE+0V~qKcfteZr7Jp=Io zKvR2+e3lXxK&ZggVfzD`u|jIqd-9Gk)~kORv~S;19pvUE6FaNAJ#Ev{8~*2FdW8F{ zl$kupx?d{g$VV&{q^-Gr#-eGF6))gaYJ-&I(=wcjno6GawwgHg`V?pXWO1eEk7A!q zWOX+?5sUN1C9Vuu+0>p&CFN==6m5B4^uuLlizsp(N4M7pjIx$s=johB^W<<9mU-Y< zW2N|=`SPACtk!b#j6GLYnM9d7$Mqk}O}Ho27Uq1mc3adE7Q_|_vgi@@mM zqM0J8aW z8+3f$4v-gl0$Bpuq5$c474iYEPCmw#Qup6+@~#LAB(P3^}0TOZJ&{5 zz5R;wtMG%{I0N_NOjYK2*y03WGDS90cihTHhM-cO7H<&;ctOm$sxdioNKhl z#9N@L^uX{|3Fm=ttFl5{H;Hji#}kPzichOr{Cl8U6=!1q0YUdJ*r@{Vl=su$2-1e= zUQi68jQ@b;Dkt}w&?wE2NbvqovQ>lwHFTu>a`!1W-`Y1CzKxE38M$E}X6xhZQF3O) z6fCuff+l;-u0n>e0yP`)Z?I+9n2jgk@n=pNP7>l>4W3yJwVkOM>n*UIR+1(IrM`t~ zA!E0^H?xxMjmnnXK8D@ou%GVB>2=U`E5JUlR9>V4E_F|D38-!(`7Bm|tzXT^oo$+p z#>>5|3_a2ld}MS~hI3)Tw|P@>$d^hJ5#s~qIw6f~iAtP@zov0u%j2h|>DZ&3EUZUI zKaO@amT|C2E3&}B!Sz~kX>ljgb&D1`T|4?w=z^)_ph#l$LO^L?UdPlh|J&YcMFuyT zq@;rM3&&hZ7p8vBk!+Vp{Dz(}rft%Q~)rOOJL=wuvy?qI*}FnMh7Y zN2WtmV6*8?9qWU4SurVGNZTo^Pd`z{-!Cu(`eJFP=f5Ow_T`e=>}F)K$FlE-Nswy| z^!2&(tq}j9QF_Y|q>|d+hXG!6T|QJ!>ht%;;2ROSSBt9m0t*hBV1IOKIZ%w%2UA=9 zt>VOt#~wm%<%kzlrW*YW+#*4yk~~Im>jz#F!^nnDctz@mpyk4=bs;Wmd*6d)~8Ng z9n(lHi`$M_--BFC7G>*0b=S$mpxodEp;2F)h7QHLc5E7gO=z0Te7WZYx9eIdOGobN zn5x_xI9zkVaK|bwF4J`f1#bgK1}L{`UWOm@DcI4I!Lpt6gve{Np?Y@2K;;JQ-mOX{ zP`SJ3?$Qu@`=N73Vt32GyXe^>JEqrrR1c?b*7Y?Y{z;z7X}&N8+cAqh_-mNFo;msH ze1=^U0kC78Uq(e?CH2;J?jQ|g`P`RkGQ0C02!HA8f3wp$NyazaDq)Bm1Ll`(!g_by zBG6ASqxkU6WC>&+OlnT^ilz;CYz|AcsWMkN(DjW=&)C6o-{ZyX1b*ou(Gsq`?f6Nk z&Xarr*Y8Ke)}!x`%S;-`@uylsgGC<&O3u6Ec`8cXF8#!nn&>6X-gh(>I&D0;@uGO` z>A8pNQ#3#3T3dxydsRxdaY@?bH~ZNpXG|fp4+ZX0-(P;j(^_+7xX*+)JiDByTD7PrvdiOm7X1Kg?_`Rj#5W ziBB&a``oSy1g4g};dM@q z8glSd%J$mzC4e|V83Aw=A(Ft2rN)uP48(%ZG1@yCi5tc-g2AU#Gi9qutep!(&beO_ zQsdjx-Qu)QmVVoP=7Ij@J@vNv$N6XTN_)zQBYu}W`K~aaszf*?QXk8Yi3T}bEISd3 z#WKY@L0QVtCdjEA38K&s;P`>O9zZk#m1)>17E9FQ6Hc&9-*(Qjnt_c|l~H%>Gmca` z=;4eB%55v%rDS8rxp3h*0vtbex(C8{3rNyMIE+e(&D_d)%sx8OF5f6F7@6dwnv8yj z07Q!T5PLx_-UsQXlyuyLTq7#XCG>Dv_U@@--v<(Tzr{#o+v+8oX)DM-6!ciU&(NMG zu5J*rfkTDQ&a3Rq zyr`v?ZzsA*q3ro+$xPth0WM)+nU)3u8tH|KDJ>LC zV7%dr9%^Z?22R}F#D+BJ%NuuItd$y=@v&)p8UD;3X}kBPoSU#GLz!m67DFSv#9_-Q z99NC>1a1x6F~8LbVHw50`{B+{Aj|;GK3+ST9d+rZ4y=2@{t<(d27%@FHS6YE4^UW<6; z&JS%x0|SmYHx*~mkI~`}+^v5c3jAbuthKmaRB(UoHoulVzE38M)7`u)B98n>=8L*F zkux+fiI_0QacvQ7@eYKqnB@e72#eQ%(X~+N)vPCt+2(fK-AVXWFE#QI*-dO*uACmZ{`|W` zFJoR<-})MRXCM=sK!jCWrrKEt@NMKR)#AWL_irB2%40lUMjiX$^rPGAGlzbCvrBoGAHE96~~xDs2k;Ytktlq#-DGyYR7%U+ji^ z*&+btTNgmjM)({U5e?+c<&Xpg);-yENv6vi-|n88Fl2AEGx5=}SxmB0SqWNwj9Z8= zxO#vChLcg#F{F;V+1Ra;a6UtqSA{yFpQh$-*dR!86mP0;p!4yB7lR-7KTEPQ?#7XtBBtJA$}^JTBe z-!3cA-)w15cz5^I=lU0iB;t1t2whmv>&>!jr*I?E<4fqNuwgW?K_Z&oN}oFFmheRd z?g3pJ%Z1lgWk>}4{B2rRmgeDSOVO8v??bn?%#hh)RS2Im1IN_h+^Lplm^1y$#qitUWFLW|%sl^f$yZ)ptkB*$ zEG6O*bN0Nw2yy6Ko|m9iOMTD2o`>3#3zBUr8RIN%fBj5K5Uaxo+JP+vBgw=^u+u#( z?m$0gDf!nN&;DRr2L6FheqVZqm+Aa{ui!V_R{Z`}O8c9PCHLK1Bc1FM24Fk7mkC>> zg&EDs*29fpK94aoduFOFpTXyk=)6yT<}UCJC-=I#^VAmyOU1<}1G2>>x=B0C!!K?= z@bbL(USl7U);sys{*4di@ke`#0EHPIgvND#pi3@3j{E{Q<^3esnUm>tXz&Z90gCjc@|E{PWDV zhLp79Uj{xG+9fr=C33RXotcT1=Jdx@?7Eg|6It~0l}U2B=ku!B*W}|x!wNSJ?-?_j z^L82VB!YsaK~Y#4NEtxoLWtnkSYSdc)f#3)=S~f=PcULGUHbaUaJw6Gw?HCJPhRPL zSMXgKc1+#pnGN^6!Sg$-!LrzCET}&b=|Zsh0ZLgwlZq}46FJ>|qN0S3RDQbJf8ex2 z;Ca_RDT!5gFPH}3J8)Rlr<&O&&T2XQFer`k*o1o*RB7|9Ek(_%h+T#025y!&(oUuy zR~rDeK#II)ClxcCa^5dge4sql3NQ|k%jDTva5+hQSH+K# z4HD=j8E`QTc3_=@U8qh~tn>JHqe zx~)JqaFhPo^Vv$i56{hR={<3xWYM@#@$KQRzR8P_;k+FkM5f$53>UVT0~6ug3l0K+ z#jtmiir-;6Gy29;ckLQg^6Qr${Ra=geGSYTV$E((g!ex%lqvHydGU@{ZwmSVxb-@k z07aj}tZ48Fdi*p_K6;e?DP16;94B7Xmm+X<$WZpOK6MCtl{Zau@9`IR`7bm{HS{0m zHSBl1A>c9Ubx&U2N_(^01Z0=ISPoWm9V7&wvwcA&zDfWn5Bb!yWR6S6(M`yUhi6;V zmVWd?^V6PAvCzU!OBgyH@sCy|w7L#*!}P^dy{G-@m1ESFY4oGKJ8bQMoqn&g+jnkC z**WE($P;Je{HAWbGW#>K8=&s|dcbp96BEtc&^ZQbjc;qs(!}D2d#>Ky{BlauJl$VO zb|8e&vfJid^DXs0hP>7^i<^DXS?W!$3sti{Z#!XI3AKI(3mjN z-{-JM80?rT1%u4nL9L73%^3qp{JR>7=mtss(T)LCX|u7HZqK!L;?KYFk&Sz2BjP() zKp#Q)f!EFL$4Ov&v-Sob=akf7ToYl4W}D#VNAswf^vF*WFw8RWZ_OMs39JijysdFy z4~BbYwI_5$ls*gE=Mbw7#sJcY5dwuy>wCVRb3L9c&Lesuqxv;koj?tL7xBBR+&V> zURxg2sEL$^32*rAj~Z@HpN0l4#*bxci&_gl$*+>XK?G3I!8GN?doU3!1mF=NPm$hA zm5yd?98~smETr1y`ns3J^!V_RSELUb`MZ-zb7!h!AI6D3*yI11IGnENiG5hS_Rtx`$;gZG zr8LIfut`6iH1c`OK0trp;bbHEEvM|*x`aF&-47RWMIVRo#s(Ek*cP|@=LRO3WxlsN z<|HBO5*d}5?W?Q!bM0$+MBp7bXh%WuH~D7=XRqaHh%HqL>ZQtiu~XP===+Fc1bZ{^ zTNTpjXbv<>}6v^ime}U92q2J-D{R9a606k zvi}ctZypYH{PvG3Axqi!G8H01$&$#VvLy-GW3nadB-^o!ZS+egj z_FZNyV=_zU-S?b7e&>0f>pJKB_d9=dxtfca`Mj5VdEKx3b>F4Lq8*VD$w;n)LPPIM zV~1-AdSlD)-4K2sjqDt+-M6w!5O<;P77f8Zef5g)_?7*96 z3DBHP$v@*@LpYHbn?}$XPDe8&4}S~VaPFCR;bq!*@_oLBlyPEDkc29g4?wnvLE7$6wG4j56=P6SGqHxU9 z0c4fCp1MyVQruQw>ayQ>S3nHGF_3s6w3m3J{QF6Wb1&_vYJ>1j*?t*=f zTZIpbN(yUwm8t2Y$oVi>CF)%>=Z{5HP)3zc`#7=5)lHgb^Dm8QVx`VZ_k>TPXql8m zhj4U5PKxW-l{nm!feEEt@!?R}nB9T$-zK9^g!zqAB9nUhH)aV?UiAF5>*Y3Z#sX1( z+Jlujd&Mq|q9qt6ubFeNivz!tX}BrBO>mq*_W*MSth1hqr;1SnfV;#@Nzch{Buzq6 z#;U_P2XBuU$zxUUE+Bcq7?mLBQ(<(T#WD+i3ZUk|q2jc!p3~vA-(l;@L_H>Gbz!Ib z!a1%p{nl1a z07wXk#UrgMEY*dFy?m-iDt$}XWGxzLSvs%2yAY_wcCLVhrmG!29|6NXflLR&rb+Ec z+(XuxWHVJ!Jbj#d!Q}dJN9Tr&E;VLYFxBz-1^%oTSxKh3aekQ6{O2s0x5lsMYsg<% z>aQUSbuqVyG4^~Mt@FX|S`KX8jrhkEnQsK;RysDyyMQY%gf8PUcFr&9YPKTTSH`5T z7E2v69!$wiNr{A~V<`egtZX{(_ddDaJjLGfJfCE+4#8RM0)7ilPwy0U@`!p3FP)at zaW>B08e~Vz+MZrlDr3tsi0Mv>JbR4a;(djqr=DuRQ~cytdRj?OcID&3_UzI+oTV`3 zTE_p0%=>fy@dB3dIi7b%Y7q<_hqh7Rgt7#~)Q&yTO}YBv`b`6~E-wl5s;-n%QLAjV zK7Y~_DtJW7DC(%YAOFnfxsDCU7DFAB?jBW?{->M?ON3Sokn=X%s7-5tOG<09Srzbv zIso0vC}cf8%J@t3@QgXKPgR)O)Vn7UOZo{RyORgDUPp+d`Uge;fA_x&eV_rod%h96 z7wZb6w%vkJqft{2$RHpJ1VK>G?J6XRWdnJv(4>+|ISwTzulm+?k)q)^boZY;0$^w- z=mEnRc!63)o&=|=8)RURDO*fNMROtPC0TV4dmm4k2rJeo^rCAoKAR=eTNGky_dNQk z0-uUCmcoaW#WLZ@s~OF#9NI$ggSiPZEcMvX%pwE z8-PfD8l3l2^_2HemyAgdBVsNnfziuwI&UqeQsm)6)Z?k7amhC(_fJkQit;(0`%UX} z`?X0GXz#?qsf8rfNj(xMYhgqhbxIIo9qH#nbn5(|JlV?fqxvUTTuK;k_|~5Cz0nx1 z;QiYw`XH7p076vpUt{QPpHYDJB=w+kBoTx-A#*-9_`?mGwcg8Pgx4q? zNDq<>JU^AWZi#go)n$w=m)VJyZy^5M6LY0)zkbQ$^0y)B)#}>tlbton_f+5l1zINz z3Kkw_j0GbJF;RjzY% zEqr{mBnz-twV+H_2`7r7wsWoVf|wx-CsieUwG{cfwO0b4C~dEo#7X|F8sn$mg*{^; zKk&ufJZBpvq0vATT6xaLE%7lgG$xT}7G0Q8>S?lEW6ij25nqPsa&^fk^P^{v?X?x)AkH~S_|PPGYw3WB+yEmkea-mGO#yVl%`_j zbIA05*R9YFRM6ZdlKf#hs#q@hby{_@yKn~~Y*jS(hDG_c6naPQR{N8@^z}l@N!Qb~ zw}VckqbvVP4hA#ycxL1GT_85#fHnFnI