diff --git a/scheduler/resource/persistentcache/host.go b/scheduler/resource/persistentcache/host.go index 3daf7868736..ab0adacbe40 100644 --- a/scheduler/resource/persistentcache/host.go +++ b/scheduler/resource/persistentcache/host.go @@ -212,6 +212,9 @@ type Build struct { // Golang version. GoVersion string `csv:"goVersion"` + // Rust version. + RustVersion string `csv:"rustVersion"` + // Build platform. Platform string `csv:"platform"` } diff --git a/scheduler/resource/persistentcache/peer.go b/scheduler/resource/persistentcache/peer.go index c9e1850c551..67b0edca15e 100644 --- a/scheduler/resource/persistentcache/peer.go +++ b/scheduler/resource/persistentcache/peer.go @@ -62,6 +62,9 @@ type Peer struct { // ID is persistent cache peer id. ID string + // Persistent is whether the peer is persistent. + Persistent bool + // Pieces is finished pieces bitset. FinishedPieces *bitset.BitSet @@ -91,7 +94,7 @@ type Peer struct { } // New persistent cache peer instance. -func NewPeer(id, state string, finishedPieces *bitset.BitSet, blockParents []string, task *Task, host *Host, +func NewPeer(id, state string, persistent bool, finishedPieces *bitset.BitSet, blockParents []string, task *Task, host *Host, cost time.Duration, createdAt, updatedAt time.Time, log *logger.SugaredLoggerOnWith) *Peer { p := &Peer{ ID: id, diff --git a/scheduler/resource/persistentcache/peer_manager.go b/scheduler/resource/persistentcache/peer_manager.go index a93f21655c0..510c9ff2a59 100644 --- a/scheduler/resource/persistentcache/peer_manager.go +++ b/scheduler/resource/persistentcache/peer_manager.go @@ -77,6 +77,12 @@ func (p *peerManager) Load(ctx context.Context, peerID string) (*Peer, bool) { return nil, false } + persistent, err := strconv.ParseBool(rawPeer["persistent"]) + if err != nil { + log.Errorf("parsing persistent failed: %v", err) + return nil, false + } + finishedPieces := &bitset.BitSet{} if err := finishedPieces.UnmarshalBinary([]byte(rawPeer["finished_pieces"])); err != nil { log.Errorf("unmarshal finished pieces failed: %v", err) @@ -123,6 +129,7 @@ func (p *peerManager) Load(ctx context.Context, peerID string) (*Peer, bool) { return NewPeer( rawPeer["id"], rawPeer["state"], + persistent, finishedPieces, blockParents, task, @@ -153,6 +160,7 @@ func (p *peerManager) Store(ctx context.Context, peer *Peer) error { pipe.HSet(ctx, pkgredis.MakePersistentCachePeerKeyInScheduler(p.config.Manager.SchedulerClusterID, peer.ID), "id", peer.ID, + "persistent", peer.Persistent, "finished_pieces", finishedPieces, "state", peer.FSM.Current(), "block_parents", blockParents, diff --git a/scheduler/rpcserver/rpcserver.go b/scheduler/rpcserver/rpcserver.go index bf10b83acea..c2f1a39363c 100644 --- a/scheduler/rpcserver/rpcserver.go +++ b/scheduler/rpcserver/rpcserver.go @@ -21,7 +21,8 @@ import ( "d7y.io/dragonfly/v2/pkg/rpc/scheduler/server" "d7y.io/dragonfly/v2/scheduler/config" - resource "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" "d7y.io/dragonfly/v2/scheduler/scheduling" "d7y.io/dragonfly/v2/scheduler/storage" ) @@ -29,7 +30,8 @@ import ( // New returns a new scheduler server from the given options. func New( cfg *config.Config, - resource resource.Resource, + resource standard.Resource, + persistentCacheResource persistentcache.Resource, scheduling scheduling.Scheduling, dynconfig config.DynconfigInterface, storage storage.Storage, @@ -37,6 +39,6 @@ func New( ) *grpc.Server { return server.New( newSchedulerServerV1(cfg, resource, scheduling, dynconfig, storage), - newSchedulerServerV2(cfg, resource, scheduling, dynconfig, storage), + newSchedulerServerV2(cfg, resource, persistentCacheResource, scheduling, dynconfig, storage), opts...) } diff --git a/scheduler/rpcserver/rpcserver_test.go b/scheduler/rpcserver/rpcserver_test.go index eb016e443f0..2ae87d2080a 100644 --- a/scheduler/rpcserver/rpcserver_test.go +++ b/scheduler/rpcserver/rpcserver_test.go @@ -26,7 +26,8 @@ import ( "d7y.io/dragonfly/v2/scheduler/config" configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" - resource "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" "d7y.io/dragonfly/v2/scheduler/scheduling/mocks" storagemocks "d7y.io/dragonfly/v2/scheduler/storage/mocks" ) @@ -59,11 +60,12 @@ func TestRPCServer_New(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := mocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - svr := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + svr := New(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) tc.expect(t, svr) }) } diff --git a/scheduler/rpcserver/scheduler_server_v2.go b/scheduler/rpcserver/scheduler_server_v2.go index 41a24352589..5d63bb493d2 100644 --- a/scheduler/rpcserver/scheduler_server_v2.go +++ b/scheduler/rpcserver/scheduler_server_v2.go @@ -26,13 +26,13 @@ import ( "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/metrics" - resource "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" "d7y.io/dragonfly/v2/scheduler/scheduling" "d7y.io/dragonfly/v2/scheduler/service" "d7y.io/dragonfly/v2/scheduler/storage" ) -// TODO Implement v2 version of the rpc server apis. // schedulerServerV2 is v2 version of the scheduler grpc server. type schedulerServerV2 struct { // Service interface. @@ -42,12 +42,13 @@ type schedulerServerV2 struct { // newSchedulerServerV2 returns v2 version of the scheduler server. func newSchedulerServerV2( cfg *config.Config, - resource resource.Resource, + resource standard.Resource, + persistentCacheResource persistentcache.Resource, scheduling scheduling.Scheduling, dynconfig config.DynconfigInterface, storage storage.Storage, ) schedulerv2.SchedulerServer { - return &schedulerServerV2{service.NewV2(cfg, resource, scheduling, dynconfig, storage)} + return &schedulerServerV2{service.NewV2(cfg, resource, persistentCacheResource, scheduling, dynconfig, storage)} } // AnnouncePeer announces peer to scheduler. diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index 4f0ea69fb22..99f4437242b 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -42,8 +42,8 @@ import ( "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/job" "d7y.io/dragonfly/v2/scheduler/metrics" - persistentcache "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" - standard "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" "d7y.io/dragonfly/v2/scheduler/rpcserver" "d7y.io/dragonfly/v2/scheduler/scheduling" "d7y.io/dragonfly/v2/scheduler/storage" @@ -212,7 +212,7 @@ func New(ctx context.Context, cfg *config.Config, d dfpath.Dfpath) (*Server, err schedulerServerOptions = append(schedulerServerOptions, grpc.Creds(rpc.NewInsecureCredentials())) } - svr := rpcserver.New(cfg, resource, scheduling, dynconfig, s.storage, schedulerServerOptions...) + svr := rpcserver.New(cfg, resource, s.persistentCacheResource, scheduling, dynconfig, s.storage, schedulerServerOptions...) s.grpcServer = svr // Initialize metrics. diff --git a/scheduler/service/service_v1.go b/scheduler/service/service_v1.go index 5d5317180d7..0e51ff93672 100644 --- a/scheduler/service/service_v1.go +++ b/scheduler/service/service_v1.go @@ -807,7 +807,7 @@ func (v *V1) triggerSeedPeerTask(ctx context.Context, rg *http.Range, task *reso } // storeTask stores a new task or reuses a previous task. -func (v *V1) storeTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, typ commonv2.TaskType) *resource.Task { +func (v *V1) storeTask(_ context.Context, req *schedulerv1.PeerTaskRequest, typ commonv2.TaskType) *resource.Task { filteredQueryParams := strings.Split(req.UrlMeta.GetFilter(), idgen.FilteredQueryParamsSeparator) task, loaded := v.resource.TaskManager().Load(req.GetTaskId()) @@ -834,7 +834,7 @@ func (v *V1) storeTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, ty } // storeHost stores a new host or reuses a previous host. -func (v *V1) storeHost(ctx context.Context, peerHost *schedulerv1.PeerHost) *resource.Host { +func (v *V1) storeHost(_ context.Context, peerHost *schedulerv1.PeerHost) *resource.Host { host, loaded := v.resource.HostManager().Load(peerHost.Id) if !loaded { options := []resource.HostOption{resource.WithNetwork(resource.Network{ @@ -866,7 +866,7 @@ func (v *V1) storeHost(ctx context.Context, peerHost *schedulerv1.PeerHost) *res } // storePeer stores a new peer or reuses a previous peer. -func (v *V1) storePeer(ctx context.Context, id string, priority commonv1.Priority, rg string, task *resource.Task, host *resource.Host) *resource.Peer { +func (v *V1) storePeer(_ context.Context, id string, priority commonv1.Priority, rg string, task *resource.Task, host *resource.Host) *resource.Peer { peer, loaded := v.resource.PeerManager().Load(id) if !loaded { options := []resource.PeerOption{} @@ -1057,7 +1057,7 @@ func (v *V1) handleBeginOfPiece(ctx context.Context, peer *resource.Peer) { func (v *V1) handleEndOfPiece(ctx context.Context, peer *resource.Peer) {} // handlePieceSuccess handles successful piece. -func (v *V1) handlePieceSuccess(ctx context.Context, peer *resource.Peer, pieceResult *schedulerv1.PieceResult) { +func (v *V1) handlePieceSuccess(_ context.Context, peer *resource.Peer, pieceResult *schedulerv1.PieceResult) { // Distinguish traffic type. trafficType := commonv2.TrafficType_REMOTE_PEER if resource.IsPieceBackToSource(pieceResult.DstPid) { diff --git a/scheduler/service/service_v2.go b/scheduler/service/service_v2.go index d70a1314856..1162a20b3f2 100644 --- a/scheduler/service/service_v2.go +++ b/scheduler/service/service_v2.go @@ -39,7 +39,8 @@ import ( "d7y.io/dragonfly/v2/pkg/types" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/metrics" - resource "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" "d7y.io/dragonfly/v2/scheduler/scheduling" "d7y.io/dragonfly/v2/scheduler/storage" ) @@ -47,7 +48,10 @@ import ( // V2 is the interface for v2 version of the service. type V2 struct { // Resource interface. - resource resource.Resource + resource standard.Resource + + // Persistent cache resource interface. + persistentCacheResource persistentcache.Resource // Scheduling interface. scheduling scheduling.Scheduling @@ -65,17 +69,19 @@ type V2 struct { // New v2 version of service instance. func NewV2( cfg *config.Config, - resource resource.Resource, + resource standard.Resource, + persistentCacheResource persistentcache.Resource, scheduling scheduling.Scheduling, dynconfig config.DynconfigInterface, storage storage.Storage, ) *V2 { return &V2{ - resource: resource, - scheduling: scheduling, - config: cfg, - dynconfig: dynconfig, - storage: storage, + resource: resource, + persistentCacheResource: persistentCacheResource, + scheduling: scheduling, + config: cfg, + dynconfig: dynconfig, + storage: storage, } } @@ -227,7 +233,7 @@ func (v *V2) StatPeer(ctx context.Context, req *schedulerv2.StatPeerRequest) (*c // Set pieces to response. peer.Pieces.Range(func(key, value any) bool { - piece, ok := value.(*resource.Piece) + piece, ok := value.(*standard.Piece) if !ok { peer.Log.Errorf("invalid piece %s %#v", key, value) return true @@ -278,7 +284,7 @@ func (v *V2) StatPeer(ctx context.Context, req *schedulerv2.StatPeerRequest) (*c // Set pieces to task response. peer.Task.Pieces.Range(func(key, value any) bool { - piece, ok := value.(*resource.Piece) + piece, ok := value.(*standard.Piece) if !ok { peer.Task.Log.Errorf("invalid piece %s %#v", key, value) return true @@ -384,7 +390,7 @@ func (v *V2) DeletePeer(ctx context.Context, req *schedulerv2.DeletePeerRequest) return status.Error(codes.NotFound, msg) } - if err := peer.FSM.Event(ctx, resource.PeerEventLeave); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventLeave); err != nil { msg := fmt.Sprintf("peer fsm event failed: %s", err.Error()) peer.Log.Error(msg) return status.Error(codes.FailedPrecondition, msg) @@ -431,7 +437,7 @@ func (v *V2) StatTask(ctx context.Context, req *schedulerv2.StatTaskRequest) (*c // Set pieces to response. task.Pieces.Range(func(key, value any) bool { - piece, ok := value.(*resource.Piece) + piece, ok := value.(*standard.Piece) if !ok { task.Log.Errorf("invalid piece %s %#v", key, value) return true @@ -471,14 +477,14 @@ func (v *V2) DeleteTask(ctx context.Context, req *schedulerv2.DeleteTaskRequest) } host.Peers.Range(func(key, value any) bool { - peer, ok := value.(*resource.Peer) + peer, ok := value.(*standard.Peer) if !ok { host.Log.Errorf("invalid peer %s %#v", key, value) return true } if peer.Task.ID == req.GetTaskId() { - if err := peer.FSM.Event(ctx, resource.PeerEventLeave); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventLeave); err != nil { msg := fmt.Sprintf("peer fsm event failed: %s", err.Error()) peer.Log.Error(msg) return true @@ -510,26 +516,26 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ host, loaded := v.resource.HostManager().Load(req.Host.GetId()) if !loaded { - options := []resource.HostOption{ - resource.WithDisableShared(req.Host.GetDisableShared()), - resource.WithOS(req.Host.GetOs()), - resource.WithPlatform(req.Host.GetPlatform()), - resource.WithPlatformFamily(req.Host.GetPlatformFamily()), - resource.WithPlatformVersion(req.Host.GetPlatformVersion()), - resource.WithKernelVersion(req.Host.GetKernelVersion()), + options := []standard.HostOption{ + standard.WithDisableShared(req.Host.GetDisableShared()), + standard.WithOS(req.Host.GetOs()), + standard.WithPlatform(req.Host.GetPlatform()), + standard.WithPlatformFamily(req.Host.GetPlatformFamily()), + standard.WithPlatformVersion(req.Host.GetPlatformVersion()), + standard.WithKernelVersion(req.Host.GetKernelVersion()), } if concurrentUploadLimit > 0 { - options = append(options, resource.WithConcurrentUploadLimit(concurrentUploadLimit)) + options = append(options, standard.WithConcurrentUploadLimit(concurrentUploadLimit)) } if req.Host.GetCpu() != nil { - options = append(options, resource.WithCPU(resource.CPU{ + options = append(options, standard.WithCPU(standard.CPU{ LogicalCount: req.Host.Cpu.GetLogicalCount(), PhysicalCount: req.Host.Cpu.GetPhysicalCount(), Percent: req.Host.Cpu.GetPercent(), ProcessPercent: req.Host.Cpu.GetProcessPercent(), - Times: resource.CPUTimes{ + Times: standard.CPUTimes{ User: req.Host.Cpu.Times.GetUser(), System: req.Host.Cpu.Times.GetSystem(), Idle: req.Host.Cpu.Times.GetIdle(), @@ -545,7 +551,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetMemory() != nil { - options = append(options, resource.WithMemory(resource.Memory{ + options = append(options, standard.WithMemory(standard.Memory{ Total: req.Host.Memory.GetTotal(), Available: req.Host.Memory.GetAvailable(), Used: req.Host.Memory.GetUsed(), @@ -556,7 +562,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetNetwork() != nil { - options = append(options, resource.WithNetwork(resource.Network{ + options = append(options, standard.WithNetwork(standard.Network{ TCPConnectionCount: req.Host.Network.GetTcpConnectionCount(), UploadTCPConnectionCount: req.Host.Network.GetUploadTcpConnectionCount(), Location: req.Host.Network.GetLocation(), @@ -569,7 +575,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetDisk() != nil { - options = append(options, resource.WithDisk(resource.Disk{ + options = append(options, standard.WithDisk(standard.Disk{ Total: req.Host.Disk.GetTotal(), Free: req.Host.Disk.GetFree(), Used: req.Host.Disk.GetUsed(), @@ -582,7 +588,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetBuild() != nil { - options = append(options, resource.WithBuild(resource.Build{ + options = append(options, standard.WithBuild(standard.Build{ GitVersion: req.Host.Build.GetGitVersion(), GitCommit: req.Host.Build.GetGitCommit(), GoVersion: req.Host.Build.GetGoVersion(), @@ -591,14 +597,14 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetSchedulerClusterId() != 0 { - options = append(options, resource.WithSchedulerClusterID(uint64(v.config.Manager.SchedulerClusterID))) + options = append(options, standard.WithSchedulerClusterID(uint64(v.config.Manager.SchedulerClusterID))) } if req.GetInterval() != nil { - options = append(options, resource.WithAnnounceInterval(req.GetInterval().AsDuration())) + options = append(options, standard.WithAnnounceInterval(req.GetInterval().AsDuration())) } - host = resource.NewHost( + host = standard.NewHost( req.Host.GetId(), req.Host.GetIp(), req.Host.GetHostname(), req.Host.GetPort(), req.Host.GetDownloadPort(), types.HostType(req.Host.GetType()), options..., @@ -626,12 +632,12 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetCpu() != nil { - host.CPU = resource.CPU{ + host.CPU = standard.CPU{ LogicalCount: req.Host.Cpu.GetLogicalCount(), PhysicalCount: req.Host.Cpu.GetPhysicalCount(), Percent: req.Host.Cpu.GetPercent(), ProcessPercent: req.Host.Cpu.GetProcessPercent(), - Times: resource.CPUTimes{ + Times: standard.CPUTimes{ User: req.Host.Cpu.Times.GetUser(), System: req.Host.Cpu.Times.GetSystem(), Idle: req.Host.Cpu.Times.GetIdle(), @@ -647,7 +653,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetMemory() != nil { - host.Memory = resource.Memory{ + host.Memory = standard.Memory{ Total: req.Host.Memory.GetTotal(), Available: req.Host.Memory.GetAvailable(), Used: req.Host.Memory.GetUsed(), @@ -658,7 +664,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetNetwork() != nil { - host.Network = resource.Network{ + host.Network = standard.Network{ TCPConnectionCount: req.Host.Network.GetTcpConnectionCount(), UploadTCPConnectionCount: req.Host.Network.GetUploadTcpConnectionCount(), Location: req.Host.Network.GetLocation(), @@ -671,7 +677,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetDisk() != nil { - host.Disk = resource.Disk{ + host.Disk = standard.Disk{ Total: req.Host.Disk.GetTotal(), Free: req.Host.Disk.GetFree(), Used: req.Host.Disk.GetUsed(), @@ -684,7 +690,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ } if req.Host.GetBuild() != nil { - host.Build = resource.Build{ + host.Build = standard.Build{ GitVersion: req.Host.Build.GetGitVersion(), GitCommit: req.Host.Build.GetGitCommit(), GoVersion: req.Host.Build.GetGoVersion(), @@ -703,7 +709,7 @@ func (v *V2) AnnounceHost(ctx context.Context, req *schedulerv2.AnnounceHostRequ func (v *V2) ListHosts(ctx context.Context) (*schedulerv2.ListHostsResponse, error) { hosts := []*commonv2.Host{} v.resource.HostManager().Range(func(_ any, value any) bool { - host, ok := value.(*resource.Host) + host, ok := value.(*standard.Host) if !ok { // Continue to next host. logger.Warnf("invalid host %#v", value) @@ -823,10 +829,10 @@ func (v *V2) handleRegisterPeerRequest(ctx context.Context, stream schedulerv2.S peer.NeedBackToSource.Store(true) // If task is pending, failed, leave, or succeeded and has no available peer, // scheduler trigger seed peer download back-to-source. - case task.FSM.Is(resource.TaskStatePending) || - task.FSM.Is(resource.TaskStateFailed) || - task.FSM.Is(resource.TaskStateLeave) || - task.FSM.Is(resource.TaskStateSucceeded) && + case task.FSM.Is(standard.TaskStatePending) || + task.FSM.Is(standard.TaskStateFailed) || + task.FSM.Is(standard.TaskStateLeave) || + task.FSM.Is(standard.TaskStateSucceeded) && !task.HasAvailablePeer(blocklist): // If HostType is normal, trigger seed peer download back-to-source. @@ -853,8 +859,8 @@ func (v *V2) handleRegisterPeerRequest(ctx context.Context, stream schedulerv2.S } // Handle task with peer register request. - if !peer.Task.FSM.Is(resource.TaskStateRunning) { - if err := peer.Task.FSM.Event(ctx, resource.TaskEventDownload); err != nil { + if !peer.Task.FSM.Is(standard.TaskStateRunning) { + if err := peer.Task.FSM.Event(ctx, standard.TaskEventDownload); err != nil { // Collect RegisterPeerFailureCount metrics. metrics.RegisterPeerFailureCount.WithLabelValues(priority.String(), peer.Task.Type.String(), peer.Host.Type.Name()).Inc() @@ -875,7 +881,7 @@ func (v *V2) handleRegisterPeerRequest(ctx context.Context, stream schedulerv2.S return status.Error(codes.NotFound, "AnnouncePeerStream not found") } - if err := peer.FSM.Event(ctx, resource.PeerEventRegisterEmpty); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventRegisterEmpty); err != nil { return status.Errorf(codes.Internal, err.Error()) } @@ -891,7 +897,7 @@ func (v *V2) handleRegisterPeerRequest(ctx context.Context, stream schedulerv2.S return nil case commonv2.SizeScope_NORMAL, commonv2.SizeScope_TINY, commonv2.SizeScope_SMALL, commonv2.SizeScope_UNKNOW: peer.Log.Info("scheduling as SizeScope_NORMAL") - if err := peer.FSM.Event(ctx, resource.PeerEventRegisterNormal); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventRegisterNormal); err != nil { return status.Error(codes.Internal, err.Error()) } @@ -928,8 +934,8 @@ func (v *V2) handleDownloadPeerStartedRequest(ctx context.Context, peerID string peer.Host.Type.Name()).Inc() // Handle peer with peer started request. - if !peer.FSM.Is(resource.PeerStateRunning) { - if err := peer.FSM.Event(ctx, resource.PeerEventDownload); err != nil { + if !peer.FSM.Is(standard.PeerStateRunning) { + if err := peer.FSM.Event(ctx, standard.PeerEventDownload); err != nil { // Collect DownloadPeerStartedFailureCount metrics. metrics.DownloadPeerStartedFailureCount.WithLabelValues(priority.String(), peer.Task.Type.String(), peer.Host.Type.Name()).Inc() @@ -953,8 +959,8 @@ func (v *V2) handleDownloadPeerBackToSourceStartedRequest(ctx context.Context, p peer.Host.Type.Name()).Inc() // Handle peer with peer back-to-source started request. - if !peer.FSM.Is(resource.PeerStateRunning) { - if err := peer.FSM.Event(ctx, resource.PeerEventDownloadBackToSource); err != nil { + if !peer.FSM.Is(standard.PeerStateRunning) { + if err := peer.FSM.Event(ctx, standard.PeerEventDownloadBackToSource); err != nil { // Collect DownloadPeerBackToSourceStartedFailureCount metrics. metrics.DownloadPeerBackToSourceStartedFailureCount.WithLabelValues(priority.String(), peer.Task.Type.String(), peer.Host.Type.Name()).Inc() @@ -966,7 +972,7 @@ func (v *V2) handleDownloadPeerBackToSourceStartedRequest(ctx context.Context, p } // handleRescheduleRequest handles RescheduleRequest of AnnouncePeerRequest. -func (v *V2) handleRescheduleRequest(ctx context.Context, peerID string, candidateParents []*commonv2.Peer) error { +func (v *V2) handleRescheduleRequest(_ context.Context, peerID string, candidateParents []*commonv2.Peer) error { peer, loaded := v.resource.PeerManager().Load(peerID) if !loaded { return status.Errorf(codes.NotFound, "peer %s not found", peerID) @@ -997,7 +1003,7 @@ func (v *V2) handleDownloadPeerFinishedRequest(ctx context.Context, peerID strin // Handle peer with peer finished request. peer.Cost.Store(time.Since(peer.CreatedAt.Load())) - if err := peer.FSM.Event(ctx, resource.PeerEventDownloadSucceeded); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventDownloadSucceeded); err != nil { return status.Error(codes.Internal, err.Error()) } @@ -1020,16 +1026,16 @@ func (v *V2) handleDownloadPeerBackToSourceFinishedRequest(ctx context.Context, // Handle peer with peer back-to-source finished request. peer.Cost.Store(time.Since(peer.CreatedAt.Load())) - if err := peer.FSM.Event(ctx, resource.PeerEventDownloadSucceeded); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventDownloadSucceeded); err != nil { return status.Error(codes.Internal, err.Error()) } // Handle task with peer back-to-source finished request, peer can only represent // a successful task after downloading the complete task. - if peer.Range == nil && !peer.Task.FSM.Is(resource.TaskStateSucceeded) { + if peer.Range == nil && !peer.Task.FSM.Is(standard.TaskStateSucceeded) { peer.Task.ContentLength.Store(int64(req.GetContentLength())) peer.Task.TotalPieceCount.Store(int32(req.GetPieceCount())) - if err := peer.Task.FSM.Event(ctx, resource.TaskEventDownloadSucceeded); err != nil { + if err := peer.Task.FSM.Event(ctx, standard.TaskEventDownloadSucceeded); err != nil { return status.Error(codes.Internal, err.Error()) } } @@ -1052,7 +1058,7 @@ func (v *V2) handleDownloadPeerFailedRequest(ctx context.Context, peerID string) } // Handle peer with peer failed request. - if err := peer.FSM.Event(ctx, resource.PeerEventDownloadFailed); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventDownloadFailed); err != nil { return status.Error(codes.Internal, err.Error()) } @@ -1077,7 +1083,7 @@ func (v *V2) handleDownloadPeerBackToSourceFailedRequest(ctx context.Context, pe } // Handle peer with peer back-to-source failed request. - if err := peer.FSM.Event(ctx, resource.PeerEventDownloadFailed); err != nil { + if err := peer.FSM.Event(ctx, standard.PeerEventDownloadFailed); err != nil { return status.Error(codes.Internal, err.Error()) } @@ -1085,7 +1091,7 @@ func (v *V2) handleDownloadPeerBackToSourceFailedRequest(ctx context.Context, pe peer.Task.ContentLength.Store(-1) peer.Task.TotalPieceCount.Store(0) peer.Task.DirectPiece = []byte{} - if err := peer.Task.FSM.Event(ctx, resource.TaskEventDownloadFailed); err != nil { + if err := peer.Task.FSM.Event(ctx, standard.TaskEventDownloadFailed); err != nil { return status.Error(codes.Internal, err.Error()) } @@ -1102,7 +1108,7 @@ func (v *V2) handleDownloadPeerBackToSourceFailedRequest(ctx context.Context, pe // handleDownloadPieceFinishedRequest handles DownloadPieceFinishedRequest of AnnouncePeerRequest. func (v *V2) handleDownloadPieceFinishedRequest(peerID string, req *schedulerv2.DownloadPieceFinishedRequest) error { // Construct piece. - piece := &resource.Piece{ + piece := &standard.Piece{ Number: int32(req.Piece.GetNumber()), ParentID: req.Piece.GetParentId(), Offset: req.Piece.GetOffset(), @@ -1163,9 +1169,9 @@ func (v *V2) handleDownloadPieceFinishedRequest(peerID string, req *schedulerv2. } // handleDownloadPieceBackToSourceFinishedRequest handles DownloadPieceBackToSourceFinishedRequest of AnnouncePeerRequest. -func (v *V2) handleDownloadPieceBackToSourceFinishedRequest(ctx context.Context, peerID string, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest) error { +func (v *V2) handleDownloadPieceBackToSourceFinishedRequest(_ context.Context, peerID string, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest) error { // Construct piece. - piece := &resource.Piece{ + piece := &standard.Piece{ Number: int32(req.Piece.GetNumber()), ParentID: req.Piece.GetParentId(), Offset: req.Piece.GetOffset(), @@ -1215,7 +1221,7 @@ func (v *V2) handleDownloadPieceBackToSourceFinishedRequest(ctx context.Context, } // handleDownloadPieceFailedRequest handles DownloadPieceFailedRequest of AnnouncePeerRequest. -func (v *V2) handleDownloadPieceFailedRequest(ctx context.Context, peerID string, req *schedulerv2.DownloadPieceFailedRequest) error { +func (v *V2) handleDownloadPieceFailedRequest(_ context.Context, peerID string, req *schedulerv2.DownloadPieceFailedRequest) error { peer, loaded := v.resource.PeerManager().Load(peerID) if !loaded { return status.Errorf(codes.NotFound, "peer %s not found", peerID) @@ -1244,7 +1250,7 @@ func (v *V2) handleDownloadPieceFailedRequest(ctx context.Context, peerID string } // handleDownloadPieceBackToSourceFailedRequest handles DownloadPieceBackToSourceFailedRequest of AnnouncePeerRequest. -func (v *V2) handleDownloadPieceBackToSourceFailedRequest(ctx context.Context, peerID string, req *schedulerv2.DownloadPieceBackToSourceFailedRequest) error { +func (v *V2) handleDownloadPieceBackToSourceFailedRequest(_ context.Context, peerID string, _ *schedulerv2.DownloadPieceBackToSourceFailedRequest) error { peer, loaded := v.resource.PeerManager().Load(peerID) if !loaded { return status.Errorf(codes.NotFound, "peer %s not found", peerID) @@ -1266,7 +1272,7 @@ func (v *V2) handleDownloadPieceBackToSourceFailedRequest(ctx context.Context, p } // handleResource handles resource included host, task, and peer. -func (v *V2) handleResource(ctx context.Context, stream schedulerv2.Scheduler_AnnouncePeerServer, hostID, taskID, peerID string, download *commonv2.Download) (*resource.Host, *resource.Task, *resource.Peer, error) { +func (v *V2) handleResource(_ context.Context, stream schedulerv2.Scheduler_AnnouncePeerServer, hostID, taskID, peerID string, download *commonv2.Download) (*standard.Host, *standard.Task, *standard.Peer, error) { // If the host does not exist and the host address cannot be found, // it may cause an exception. host, loaded := v.resource.HostManager().Load(hostID) @@ -1277,7 +1283,7 @@ func (v *V2) handleResource(ctx context.Context, stream schedulerv2.Scheduler_An // Store new task or update task. task, loaded := v.resource.TaskManager().Load(taskID) if !loaded { - options := []resource.TaskOption{resource.WithPieceLength(int32(download.GetPieceLength()))} + options := []standard.TaskOption{standard.WithPieceLength(int32(download.GetPieceLength()))} if download.GetDigest() != "" { d, err := digest.Parse(download.GetDigest()) if err != nil { @@ -1285,10 +1291,10 @@ func (v *V2) handleResource(ctx context.Context, stream schedulerv2.Scheduler_An } // If request has invalid digest, then new task with the nil digest. - options = append(options, resource.WithDigest(d)) + options = append(options, standard.WithDigest(d)) } - task = resource.NewTask(taskID, download.GetUrl(), download.GetTag(), download.GetApplication(), download.GetType(), + task = standard.NewTask(taskID, download.GetUrl(), download.GetTag(), download.GetApplication(), download.GetType(), download.GetFilteredQueryParams(), download.GetRequestHeader(), int32(v.config.Scheduler.BackToSourceCount), options...) v.resource.TaskManager().Store(task) } else { @@ -1300,12 +1306,12 @@ func (v *V2) handleResource(ctx context.Context, stream schedulerv2.Scheduler_An // Store new peer or load peer. peer, loaded := v.resource.PeerManager().Load(peerID) if !loaded { - options := []resource.PeerOption{resource.WithPriority(download.GetPriority()), resource.WithAnnouncePeerStream(stream)} + options := []standard.PeerOption{standard.WithPriority(download.GetPriority()), standard.WithAnnouncePeerStream(stream)} if download.GetRange() != nil { - options = append(options, resource.WithRange(http.Range{Start: int64(download.Range.GetStart()), Length: int64(download.Range.GetLength())})) + options = append(options, standard.WithRange(http.Range{Start: int64(download.Range.GetStart()), Length: int64(download.Range.GetLength())})) } - peer = resource.NewPeer(peerID, task, host, options...) + peer = standard.NewPeer(peerID, task, host, options...) v.resource.PeerManager().Store(peer) } @@ -1313,7 +1319,7 @@ func (v *V2) handleResource(ctx context.Context, stream schedulerv2.Scheduler_An } // downloadTaskBySeedPeer downloads task by seed peer. -func (v *V2) downloadTaskBySeedPeer(ctx context.Context, taskID string, download *commonv2.Download, peer *resource.Peer) error { +func (v *V2) downloadTaskBySeedPeer(ctx context.Context, taskID string, download *commonv2.Download, peer *standard.Peer) error { // Trigger the first download task based on different priority levels, // refer to https://github.com/dragonflyoss/api/blob/main/pkg/apis/common/v2/common.proto#L74. priority := peer.CalculatePriority(v.dynconfig) @@ -1393,10 +1399,103 @@ func (v *V2) AnnouncePersistentCachePeer(stream schedulerv2.Scheduler_AnnouncePe return nil } -// TODO Implement the following methods. // StatPersistentCachePeer checks information of persistent cache peer. func (v *V2) StatPersistentCachePeer(ctx context.Context, req *schedulerv2.StatPersistentCachePeerRequest) (*commonv2.PersistentCachePeer, error) { - return nil, nil + peer, loaded := v.persistentCacheResource.PeerManager().Load(ctx, req.GetPeerId()) + if !loaded { + return nil, status.Errorf(codes.NotFound, "persistent cache peer %s not found", req.GetPeerId()) + } + + return &commonv2.PersistentCachePeer{ + Id: peer.ID, + Persistent: peer.Persistent, + State: peer.FSM.Current(), + Cost: durationpb.New(peer.Cost), + CreatedAt: timestamppb.New(peer.CreatedAt), + UpdatedAt: timestamppb.New(peer.UpdatedAt), + Task: &commonv2.PersistentCacheTask{ + Id: peer.Task.ID, + PersistentReplicaCount: peer.Task.PersistentReplicaCount, + ReplicaCount: peer.Task.ReplicaCount, + Digest: peer.Task.Digest.String(), + Tag: &peer.Task.Tag, + Application: &peer.Task.Application, + PieceLength: uint64(peer.Task.PieceLength), + ContentLength: uint64(peer.Task.ContentLength), + PieceCount: uint32(peer.Task.TotalPieceCount), + State: peer.Task.FSM.Current(), + CreatedAt: timestamppb.New(peer.Task.CreatedAt), + UpdatedAt: timestamppb.New(peer.Task.UpdatedAt), + }, + Host: &commonv2.Host{ + Id: peer.Host.ID, + Type: uint32(peer.Host.Type), + Hostname: peer.Host.Hostname, + Ip: peer.Host.IP, + Port: peer.Host.Port, + DownloadPort: peer.Host.DownloadPort, + Os: peer.Host.OS, + Platform: peer.Host.Platform, + PlatformFamily: peer.Host.PlatformFamily, + PlatformVersion: peer.Host.PlatformVersion, + KernelVersion: peer.Host.KernelVersion, + Cpu: &commonv2.CPU{ + LogicalCount: peer.Host.CPU.LogicalCount, + PhysicalCount: peer.Host.CPU.PhysicalCount, + Percent: peer.Host.CPU.Percent, + ProcessPercent: peer.Host.CPU.ProcessPercent, + Times: &commonv2.CPUTimes{ + User: peer.Host.CPU.Times.User, + System: peer.Host.CPU.Times.System, + Idle: peer.Host.CPU.Times.Idle, + Nice: peer.Host.CPU.Times.Nice, + Iowait: peer.Host.CPU.Times.Iowait, + Irq: peer.Host.CPU.Times.Irq, + Softirq: peer.Host.CPU.Times.Softirq, + Steal: peer.Host.CPU.Times.Steal, + Guest: peer.Host.CPU.Times.Guest, + GuestNice: peer.Host.CPU.Times.GuestNice, + }, + }, + Memory: &commonv2.Memory{ + Total: peer.Host.Memory.Total, + Available: peer.Host.Memory.Available, + Used: peer.Host.Memory.Used, + UsedPercent: peer.Host.Memory.UsedPercent, + ProcessUsedPercent: peer.Host.Memory.ProcessUsedPercent, + Free: peer.Host.Memory.Free, + }, + Network: &commonv2.Network{ + TcpConnectionCount: peer.Host.Network.TCPConnectionCount, + UploadTcpConnectionCount: peer.Host.Network.UploadTCPConnectionCount, + Location: &peer.Host.Network.Location, + Idc: &peer.Host.Network.IDC, + DownloadRate: peer.Host.Network.DownloadRate, + DownloadRateLimit: peer.Host.Network.DownloadRateLimit, + UploadRate: peer.Host.Network.UploadRate, + UploadRateLimit: peer.Host.Network.UploadRateLimit, + }, + Disk: &commonv2.Disk{ + Total: peer.Host.Disk.Total, + Free: peer.Host.Disk.Free, + Used: peer.Host.Disk.Used, + UsedPercent: peer.Host.Disk.UsedPercent, + InodesTotal: peer.Host.Disk.InodesTotal, + InodesUsed: peer.Host.Disk.InodesUsed, + InodesFree: peer.Host.Disk.InodesFree, + InodesUsedPercent: peer.Host.Disk.InodesUsedPercent, + }, + Build: &commonv2.Build{ + GitVersion: peer.Host.Build.GitVersion, + GitCommit: &peer.Host.Build.GitCommit, + GoVersion: &peer.Host.Build.GoVersion, + RustVersion: &peer.Host.Build.RustVersion, + Platform: &peer.Host.Build.Platform, + }, + SchedulerClusterId: uint64(v.config.Manager.SchedulerClusterID), + DisableShared: peer.Host.DisableShared, + }, + }, nil } // TODO Implement the following methods. @@ -1423,10 +1522,27 @@ func (v *V2) UploadPersistentCacheTaskFailed(ctx context.Context, req *scheduler return nil } -// TODO Implement the following methods. // StatPersistentCacheTask checks information of persistent cache task. func (v *V2) StatPersistentCacheTask(ctx context.Context, req *schedulerv2.StatPersistentCacheTaskRequest) (*commonv2.PersistentCacheTask, error) { - return nil, nil + task, loaded := v.persistentCacheResource.TaskManager().Load(ctx, req.GetTaskId()) + if !loaded { + return nil, status.Errorf(codes.NotFound, "persistent cache task %s not found", req.GetTaskId()) + } + + return &commonv2.PersistentCacheTask{ + Id: task.ID, + PersistentReplicaCount: task.PersistentReplicaCount, + ReplicaCount: task.ReplicaCount, + Digest: task.Digest.String(), + Tag: &task.Tag, + Application: &task.Application, + PieceLength: uint64(task.PieceLength), + ContentLength: uint64(task.ContentLength), + PieceCount: uint32(task.TotalPieceCount), + State: task.FSM.Current(), + CreatedAt: timestamppb.New(task.CreatedAt), + UpdatedAt: timestamppb.New(task.UpdatedAt), + }, nil } // TODO Implement the following methods. diff --git a/scheduler/service/service_v2_test.go b/scheduler/service/service_v2_test.go index 3a1e6631886..34245153f77 100644 --- a/scheduler/service/service_v2_test.go +++ b/scheduler/service/service_v2_test.go @@ -47,7 +47,8 @@ import ( pkgtypes "d7y.io/dragonfly/v2/pkg/types" "d7y.io/dragonfly/v2/scheduler/config" configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" - resource "d7y.io/dragonfly/v2/scheduler/resource/standard" + "d7y.io/dragonfly/v2/scheduler/resource/persistentcache" + "d7y.io/dragonfly/v2/scheduler/resource/standard" schedulingmocks "d7y.io/dragonfly/v2/scheduler/scheduling/mocks" storagemocks "d7y.io/dragonfly/v2/scheduler/storage/mocks" ) @@ -71,11 +72,12 @@ func TestService_NewV2(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - resource := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - tc.expect(t, NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, scheduling, dynconfig, storage)) + tc.expect(t, NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage)) }) } } @@ -83,25 +85,25 @@ func TestService_NewV2(t *testing.T) { func TestServiceV2_StatPeer(t *testing.T) { tests := []struct { name string - mock func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) - expect func(t *testing.T, peer *resource.Peer, resp *commonv2.Peer, err error) + mock func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *standard.Peer, resp *commonv2.Peer, err error) }{ { name: "peer not found", - mock: func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mock: func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Any()).Return(nil, false).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, resp *commonv2.Peer, err error) { + expect: func(t *testing.T, peer *standard.Peer, resp *commonv2.Peer, err error) { assert := assert.New(t) assert.ErrorIs(err, status.Errorf(codes.NotFound, "peer %s not found", mockPeerID)) }, }, { name: "peer has been loaded", - mock: func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mock: func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { peer.StorePiece(&mockPiece) peer.Task.StorePiece(&mockPiece) gomock.InOrder( @@ -109,7 +111,7 @@ func TestServiceV2_StatPeer(t *testing.T) { mp.Load(gomock.Any()).Return(peer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, resp *commonv2.Peer, err error) { + expect: func(t *testing.T, peer *standard.Peer, resp *commonv2.Peer, err error) { dgst := peer.Task.Digest.String() assert := assert.New(t) @@ -242,18 +244,19 @@ func TestServiceV2_StatPeer(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + peerManager := standard.NewMockPeerManager(ctl) + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost, resource.WithRange(mockPeerRange)) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockSeedPeerID, mockTask, mockHost, standard.WithRange(mockPeerRange)) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.mock(peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.mock(peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) resp, err := svc.StatPeer(context.Background(), &schedulerv2.StatPeerRequest{TaskId: mockTaskID, PeerId: mockPeerID}) tc.expect(t, peer, resp, err) }) @@ -263,12 +266,12 @@ func TestServiceV2_StatPeer(t *testing.T) { func TestServiceV2_DeletePeer(t *testing.T) { tests := []struct { name string - mock func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + mock func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) expect func(t *testing.T, err error) }{ { name: "peer not found", - mock: func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mock: func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Any()).Return(nil, false).Times(1), @@ -281,8 +284,8 @@ func TestServiceV2_DeletePeer(t *testing.T) { }, { name: "peer fsm event failed", - mock: func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { - peer.FSM.SetState(resource.PeerStateLeave) + mock: func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { + peer.FSM.SetState(standard.PeerStateLeave) gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Any()).Return(peer, true).Times(1), @@ -295,7 +298,7 @@ func TestServiceV2_DeletePeer(t *testing.T) { }, { name: "peer leaves succeeded", - mock: func(peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mock: func(peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Any()).Return(peer, true).Times(1), @@ -313,18 +316,19 @@ func TestServiceV2_DeletePeer(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + peerManager := standard.NewMockPeerManager(ctl) + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost, resource.WithRange(mockPeerRange)) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockSeedPeerID, mockTask, mockHost, standard.WithRange(mockPeerRange)) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.mock(peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.mock(peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) tc.expect(t, svc.DeletePeer(context.Background(), &schedulerv2.DeletePeerRequest{TaskId: mockTaskID, PeerId: mockPeerID})) }) } @@ -333,32 +337,32 @@ func TestServiceV2_DeletePeer(t *testing.T) { func TestServiceV2_StatTask(t *testing.T) { tests := []struct { name string - mock func(task *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) - expect func(t *testing.T, task *resource.Task, resp *commonv2.Task, err error) + mock func(task *standard.Task, taskManager standard.TaskManager, mr *standard.MockResourceMockRecorder, mt *standard.MockTaskManagerMockRecorder) + expect func(t *testing.T, task *standard.Task, resp *commonv2.Task, err error) }{ { name: "task not found", - mock: func(task *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) { + mock: func(task *standard.Task, taskManager standard.TaskManager, mr *standard.MockResourceMockRecorder, mt *standard.MockTaskManagerMockRecorder) { gomock.InOrder( mr.TaskManager().Return(taskManager).Times(1), mt.Load(gomock.Any()).Return(nil, false).Times(1), ) }, - expect: func(t *testing.T, task *resource.Task, resp *commonv2.Task, err error) { + expect: func(t *testing.T, task *standard.Task, resp *commonv2.Task, err error) { assert := assert.New(t) assert.ErrorIs(err, status.Errorf(codes.NotFound, "task %s not found", mockTaskID)) }, }, { name: "task has been loaded", - mock: func(task *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) { + mock: func(task *standard.Task, taskManager standard.TaskManager, mr *standard.MockResourceMockRecorder, mt *standard.MockTaskManagerMockRecorder) { task.StorePiece(&mockPiece) gomock.InOrder( mr.TaskManager().Return(taskManager).Times(1), mt.Load(gomock.Any()).Return(task, true).Times(1), ) }, - expect: func(t *testing.T, task *resource.Task, resp *commonv2.Task, err error) { + expect: func(t *testing.T, task *standard.Task, resp *commonv2.Task, err error) { dgst := task.Digest.String() assert := assert.New(t) @@ -401,14 +405,15 @@ func TestServiceV2_StatTask(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - taskManager := resource.NewMockTaskManager(ctl) - task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage) + taskManager := standard.NewMockTaskManager(ctl) + task := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.mock(task, taskManager, res.EXPECT(), taskManager.EXPECT()) + tc.mock(task, taskManager, resource.EXPECT(), taskManager.EXPECT()) resp, err := svc.StatTask(context.Background(), &schedulerv2.StatTaskRequest{TaskId: mockTaskID}) tc.expect(t, task, resp, err) }) @@ -419,7 +424,7 @@ func TestServiceV2_AnnounceHost(t *testing.T) { tests := []struct { name string req *schedulerv2.AnnounceHostRequest - run func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *standard.Host, hostManager standard.HostManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "host not found", @@ -492,13 +497,13 @@ func TestServiceV2_AnnounceHost(t *testing.T) { }, Interval: durationpb.New(5 * time.Minute), }, - run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *standard.Host, hostManager standard.HostManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( md.GetSchedulerClusterClientConfig().Return(managertypes.SchedulerClusterClientConfig{LoadLimit: 10}, nil).Times(1), mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false).Times(1), mr.HostManager().Return(hostManager).Times(1), - mh.Store(gomock.Any()).Do(func(host *resource.Host) { + mh.Store(gomock.Any()).Do(func(host *standard.Host) { assert := assert.New(t) assert.Equal(host.ID, req.Host.Id) assert.Equal(host.Type, pkgtypes.HostType(req.Host.Type)) @@ -604,13 +609,13 @@ func TestServiceV2_AnnounceHost(t *testing.T) { }, Interval: durationpb.New(5 * time.Minute), }, - run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *standard.Host, hostManager standard.HostManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( md.GetSchedulerClusterClientConfig().Return(managertypes.SchedulerClusterClientConfig{}, errors.New("foo")).Times(1), mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false).Times(1), mr.HostManager().Return(hostManager).Times(1), - mh.Store(gomock.Any()).Do(func(host *resource.Host) { + mh.Store(gomock.Any()).Do(func(host *standard.Host) { assert := assert.New(t) assert.Equal(host.ID, req.Host.Id) assert.Equal(host.Type, pkgtypes.HostType(req.Host.Type)) @@ -716,7 +721,7 @@ func TestServiceV2_AnnounceHost(t *testing.T) { }, Interval: durationpb.New(5 * time.Minute), }, - run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *standard.Host, hostManager standard.HostManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( md.GetSchedulerClusterClientConfig().Return(managertypes.SchedulerClusterClientConfig{LoadLimit: 10}, nil).Times(1), mr.HostManager().Return(hostManager).Times(1), @@ -824,7 +829,7 @@ func TestServiceV2_AnnounceHost(t *testing.T) { }, Interval: durationpb.New(5 * time.Minute), }, - run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.AnnounceHostRequest, host *standard.Host, hostManager standard.HostManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( md.GetSchedulerClusterClientConfig().Return(managertypes.SchedulerClusterClientConfig{}, errors.New("foo")).Times(1), mr.HostManager().Return(hostManager).Times(1), @@ -868,16 +873,17 @@ func TestServiceV2_AnnounceHost(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - hostManager := resource.NewMockHostManager(ctl) - host := resource.NewHost( + hostManager := standard.NewMockHostManager(ctl) + host := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, host, hostManager, res.EXPECT(), hostManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, tc.req, host, hostManager, resource.EXPECT(), hostManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -888,16 +894,16 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { tests := []struct { name string req *schedulerv2.RegisterPeerRequest - run func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) }{ { name: "host not found", req: &schedulerv2.RegisterPeerRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(nil, false).Times(1), @@ -915,9 +921,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -941,9 +947,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -954,10 +960,10 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { ) peer.Priority = commonv2.Priority_LEVEL1 - peer.Task.FSM.SetState(resource.TaskStateFailed) + peer.Task.FSM.SetState(standard.TaskStateFailed) peer.Task.StorePeer(peer) peer.Task.StorePeer(seedPeer) - seedPeer.FSM.SetState(resource.PeerStateRunning) + seedPeer.FSM.SetState(standard.PeerStateRunning) assert := assert.New(t) assert.ErrorIs(svc.handleRegisterPeerRequest(context.Background(), stream, peer.Host.ID, peer.Task.ID, peer.ID, req), @@ -971,9 +977,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -989,8 +995,8 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { assert := assert.New(t) assert.ErrorIs(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req), status.Error(codes.NotFound, "AnnouncePeerStream not found")) - assert.Equal(peer.FSM.Current(), resource.PeerStatePending) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.FSM.Current(), standard.PeerStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, { @@ -1000,9 +1006,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -1015,12 +1021,12 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { peer.Task.ContentLength.Store(0) peer.Priority = commonv2.Priority_LEVEL6 peer.StoreAnnouncePeerStream(stream) - peer.FSM.SetState(resource.PeerStateReceivedEmpty) + peer.FSM.SetState(standard.PeerStateReceivedEmpty) assert := assert.New(t) assert.ErrorIs(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req), status.Errorf(codes.Internal, "event RegisterEmpty inappropriate in current state ReceivedEmpty")) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, { @@ -1030,9 +1036,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -1054,8 +1060,8 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { assert := assert.New(t) assert.ErrorIs(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req), status.Errorf(codes.Internal, "foo")) - assert.Equal(peer.FSM.Current(), resource.PeerStateReceivedEmpty) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.FSM.Current(), standard.PeerStateReceivedEmpty) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, { @@ -1066,9 +1072,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { NeedBackToSource: true, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -1089,9 +1095,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { assert := assert.New(t) assert.NoError(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req)) - assert.Equal(peer.FSM.Current(), resource.PeerStateReceivedNormal) + assert.Equal(peer.FSM.Current(), standard.PeerStateReceivedNormal) assert.Equal(peer.NeedBackToSource.Load(), true) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, { @@ -1101,9 +1107,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -1123,8 +1129,8 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { assert := assert.New(t) assert.NoError(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req)) - assert.Equal(peer.FSM.Current(), resource.PeerStateReceivedNormal) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.FSM.Current(), standard.PeerStateReceivedNormal) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, { @@ -1134,9 +1140,9 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { Digest: &dgst, }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *resource.Peer, seedPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, - peerManager resource.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.RegisterPeerRequest, peer *standard.Peer, seedPeer *standard.Peer, hostManager standard.HostManager, taskManager standard.TaskManager, + peerManager standard.PeerManager, stream schedulerv2.Scheduler_AnnouncePeerServer, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder, ma *schedulerv2mocks.MockScheduler_AnnouncePeerServerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(peer.Host.ID)).Return(peer.Host, true).Times(1), @@ -1151,8 +1157,8 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { assert := assert.New(t) assert.NoError(svc.handleRegisterPeerRequest(context.Background(), nil, peer.Host.ID, peer.Task.ID, peer.ID, req)) - assert.Equal(peer.FSM.Current(), resource.PeerStateReceivedNormal) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateRunning) + assert.Equal(peer.FSM.Current(), standard.PeerStateReceivedNormal) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateRunning) }, }, } @@ -1162,23 +1168,24 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - hostManager := resource.NewMockHostManager(ctl) - peerManager := resource.NewMockPeerManager(ctl) - taskManager := resource.NewMockTaskManager(ctl) + hostManager := standard.NewMockHostManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) + taskManager := standard.NewMockTaskManager(ctl) stream := schedulerv2mocks.NewMockScheduler_AnnouncePeerServer(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - seedPeer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + seedPeer := standard.NewPeer(mockSeedPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, seedPeer, hostManager, taskManager, peerManager, stream, res.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT(), stream.EXPECT(), scheduling.EXPECT()) + tc.run(t, svc, tc.req, peer, seedPeer, hostManager, taskManager, peerManager, stream, resource.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT(), stream.EXPECT(), scheduling.EXPECT()) }) } } @@ -1186,11 +1193,11 @@ func TestServiceV2_handleRegisterPeerRequest(t *testing.T) { func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1202,14 +1209,14 @@ func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { }, { name: "peer state is PeerStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerStartedRequest(context.Background(), peer.ID)) @@ -1217,15 +1224,15 @@ func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { }, { name: "task state is TaskStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateReceivedNormal) - peer.Task.FSM.SetState(resource.TaskStateRunning) + peer.FSM.SetState(standard.PeerStateReceivedNormal) + peer.Task.FSM.SetState(standard.TaskStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerStartedRequest(context.Background(), peer.ID)) @@ -1235,15 +1242,15 @@ func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { }, { name: "task state is TaskStatePending", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateReceivedNormal) - peer.Task.FSM.SetState(resource.TaskStatePending) + peer.FSM.SetState(standard.PeerStateReceivedNormal) + peer.Task.FSM.SetState(standard.TaskStatePending) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerStartedRequest(context.Background(), peer.ID)) @@ -1258,19 +1265,20 @@ func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1278,11 +1286,11 @@ func TestServiceV2_handleDownloadPeerStartedRequest(t *testing.T) { func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1294,14 +1302,14 @@ func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { }, { name: "peer state is PeerStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateBackToSource) + peer.FSM.SetState(standard.PeerStateBackToSource) assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceStartedRequest(context.Background(), peer.ID), status.Error(codes.Internal, "event DownloadBackToSource inappropriate in current state BackToSource")) @@ -1309,15 +1317,15 @@ func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { }, { name: "task state is TaskStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateReceivedNormal) - peer.Task.FSM.SetState(resource.TaskStateRunning) + peer.FSM.SetState(standard.PeerStateReceivedNormal) + peer.Task.FSM.SetState(standard.TaskStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceStartedRequest(context.Background(), peer.ID)) @@ -1327,15 +1335,15 @@ func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { }, { name: "task state is TaskStatePending", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateReceivedNormal) - peer.Task.FSM.SetState(resource.TaskStatePending) + peer.FSM.SetState(standard.PeerStateReceivedNormal) + peer.Task.FSM.SetState(standard.TaskStatePending) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceStartedRequest(context.Background(), peer.ID)) @@ -1350,19 +1358,20 @@ func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1370,13 +1379,13 @@ func TestServiceV2_handleDownloadPeerBackToSourceStartedRequest(t *testing.T) { func TestServiceV2_handleRescheduleRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1388,8 +1397,8 @@ func TestServiceV2_handleRescheduleRequest(t *testing.T) { }, { name: "reschedule failed", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -1402,8 +1411,8 @@ func TestServiceV2_handleRescheduleRequest(t *testing.T) { }, { name: "reschedule succeeded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, ms *schedulingmocks.MockSchedulingMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -1421,19 +1430,20 @@ func TestServiceV2_handleRescheduleRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), scheduling.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), scheduling.EXPECT()) }) } } @@ -1441,11 +1451,11 @@ func TestServiceV2_handleRescheduleRequest(t *testing.T) { func TestServiceV2_handleDownloadPeerFinishedRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1457,13 +1467,13 @@ func TestServiceV2_handleDownloadPeerFinishedRequest(t *testing.T) { }, { name: "peer state is PeerStateSucceeded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerStateSucceeded) + peer.FSM.SetState(standard.PeerStateSucceeded) assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerFinishedRequest(context.Background(), peer.ID), status.Error(codes.Internal, "event DownloadSucceeded inappropriate in current state Succeeded")) @@ -1472,18 +1482,18 @@ func TestServiceV2_handleDownloadPeerFinishedRequest(t *testing.T) { }, { name: "peer state is PeerStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerFinishedRequest(context.Background(), peer.ID)) - assert.Equal(peer.FSM.Current(), resource.PeerStateSucceeded) + assert.Equal(peer.FSM.Current(), standard.PeerStateSucceeded) assert.NotEqual(peer.Cost.Load(), 0) }, }, @@ -1494,19 +1504,20 @@ func TestServiceV2_handleDownloadPeerFinishedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1525,14 +1536,14 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { tests := []struct { name string req *schedulerv2.DownloadPeerBackToSourceFinishedRequest - run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", req: &schedulerv2.DownloadPeerBackToSourceFinishedRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1543,20 +1554,20 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) }, }, { name: "peer state is PeerStateSucceeded", req: &schedulerv2.DownloadPeerBackToSourceFinishedRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerStateSucceeded) + peer.FSM.SetState(standard.PeerStateSucceeded) assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceFinishedRequest(context.Background(), peer.ID, req), status.Error(codes.Internal, "event DownloadSucceeded inappropriate in current state Succeeded")) @@ -1564,55 +1575,55 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) }, }, { name: "peer has range", req: &schedulerv2.DownloadPeerBackToSourceFinishedRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) peer.Range = &nethttp.Range{} assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceFinishedRequest(context.Background(), peer.ID, req)) assert.NotEqual(peer.Cost.Load(), 0) - assert.Equal(peer.FSM.Current(), resource.PeerStateSucceeded) + assert.Equal(peer.FSM.Current(), standard.PeerStateSucceeded) assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) }, }, { name: "task state is TaskStateSucceeded", req: &schedulerv2.DownloadPeerBackToSourceFinishedRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) - peer.Task.FSM.SetState(resource.TaskStateSucceeded) + peer.FSM.SetState(standard.PeerStateRunning) + peer.Task.FSM.SetState(standard.TaskStateSucceeded) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceFinishedRequest(context.Background(), peer.ID, req)) assert.NotEqual(peer.Cost.Load(), 0) - assert.Equal(peer.FSM.Current(), resource.PeerStateSucceeded) + assert.Equal(peer.FSM.Current(), standard.PeerStateSucceeded) assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateSucceeded) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateSucceeded) }, }, { @@ -1621,24 +1632,24 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { ContentLength: 1024, PieceCount: 10, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) - peer.Task.FSM.SetState(resource.TaskStatePending) + peer.FSM.SetState(standard.PeerStateRunning) + peer.Task.FSM.SetState(standard.TaskStatePending) assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceFinishedRequest(context.Background(), peer.ID, req), status.Error(codes.Internal, "event DownloadSucceeded inappropriate in current state Pending")) assert.NotEqual(peer.Cost.Load(), 0) - assert.Equal(peer.FSM.Current(), resource.PeerStateSucceeded) + assert.Equal(peer.FSM.Current(), standard.PeerStateSucceeded) assert.Equal(peer.Task.ContentLength.Load(), int64(1024)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(10)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) }, }, { @@ -1647,25 +1658,25 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { ContentLength: 1024, PieceCount: 10, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPeerBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) - peer.Task.FSM.SetState(resource.TaskStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) + peer.Task.FSM.SetState(standard.TaskStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceFinishedRequest(context.Background(), peer.ID, req)) assert.NotEqual(peer.Cost.Load(), 0) - assert.Equal(peer.FSM.Current(), resource.PeerStateSucceeded) + assert.Equal(peer.FSM.Current(), standard.PeerStateSucceeded) assert.Equal(peer.Task.ContentLength.Load(), int64(1024)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(10)) assert.Equal(len(peer.Task.DirectPiece), 0) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateSucceeded) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateSucceeded) }, }, } @@ -1675,10 +1686,11 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) url, err := url.Parse(s.URL) if err != nil { @@ -1695,17 +1707,17 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { t.Fatal(err) } - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) mockHost.IP = ip mockHost.DownloadPort = int32(port) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, tc.req, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1713,11 +1725,11 @@ func TestServiceV2_handleDownloadPeerBackToSourceFinishedRequest(t *testing.T) { func TestServiceV2_handleDownloadPeerFailedRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1729,13 +1741,13 @@ func TestServiceV2_handleDownloadPeerFailedRequest(t *testing.T) { }, { name: "peer state is PeerEventDownloadFailed", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerEventDownloadFailed) + peer.FSM.SetState(standard.PeerEventDownloadFailed) assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerFailedRequest(context.Background(), peer.ID), status.Error(codes.Internal, "event DownloadFailed inappropriate in current state DownloadFailed")) @@ -1743,18 +1755,18 @@ func TestServiceV2_handleDownloadPeerFailedRequest(t *testing.T) { }, { name: "peer state is PeerStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) assert := assert.New(t) assert.NoError(svc.handleDownloadPeerFailedRequest(context.Background(), peer.ID)) - assert.Equal(peer.FSM.Current(), resource.PeerStateFailed) + assert.Equal(peer.FSM.Current(), standard.PeerStateFailed) assert.NotEqual(peer.UpdatedAt.Load(), 0) }, }, @@ -1765,19 +1777,20 @@ func TestServiceV2_handleDownloadPeerFailedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1785,11 +1798,11 @@ func TestServiceV2_handleDownloadPeerFailedRequest(t *testing.T) { func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { tests := []struct { name string - run func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) }{ { name: "peer can not be loaded", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1801,8 +1814,8 @@ func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceFailedRequest(context.Background(), peer.ID), status.Errorf(codes.NotFound, "peer %s not found", peer.ID)) - assert.Equal(peer.FSM.Current(), resource.PeerStatePending) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.FSM.Current(), standard.PeerStatePending) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) assert.Equal(peer.Task.ContentLength.Load(), int64(1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(1)) assert.Equal(peer.Task.DirectPiece, []byte{1}) @@ -1810,21 +1823,21 @@ func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { }, { name: "peer state is PeerStateFailed", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerStateFailed) + peer.FSM.SetState(standard.PeerStateFailed) peer.Task.ContentLength.Store(1) peer.Task.TotalPieceCount.Store(1) peer.Task.DirectPiece = []byte{1} assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceFailedRequest(context.Background(), peer.ID), status.Error(codes.Internal, "event DownloadFailed inappropriate in current state Failed")) - assert.Equal(peer.FSM.Current(), resource.PeerStateFailed) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStatePending) + assert.Equal(peer.FSM.Current(), standard.PeerStateFailed) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStatePending) assert.Equal(peer.Task.ContentLength.Load(), int64(1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(1)) assert.Equal(peer.Task.DirectPiece, []byte{1}) @@ -1832,22 +1845,22 @@ func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { }, { name: "task state is TaskStateFailed", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) - peer.Task.FSM.SetState(resource.TaskStateFailed) + peer.FSM.SetState(standard.PeerStateRunning) + peer.Task.FSM.SetState(standard.TaskStateFailed) peer.Task.ContentLength.Store(1) peer.Task.TotalPieceCount.Store(1) peer.Task.DirectPiece = []byte{1} assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPeerBackToSourceFailedRequest(context.Background(), peer.ID), status.Error(codes.Internal, "event DownloadFailed inappropriate in current state Failed")) - assert.Equal(peer.FSM.Current(), resource.PeerStateFailed) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateFailed) + assert.Equal(peer.FSM.Current(), standard.PeerStateFailed) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateFailed) assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(peer.Task.DirectPiece, []byte{}) @@ -1855,23 +1868,23 @@ func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { }, { name: "task state is TaskStateRunning", - run: func(t *testing.T, svc *V2, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), md.GetApplications().Return([]*managerv2.Application{}, nil).Times(1), ) - peer.FSM.SetState(resource.PeerStateRunning) - peer.Task.FSM.SetState(resource.TaskStateRunning) + peer.FSM.SetState(standard.PeerStateRunning) + peer.Task.FSM.SetState(standard.TaskStateRunning) peer.Task.ContentLength.Store(1) peer.Task.TotalPieceCount.Store(1) peer.Task.DirectPiece = []byte{1} assert := assert.New(t) assert.NoError(svc.handleDownloadPeerBackToSourceFailedRequest(context.Background(), peer.ID)) - assert.Equal(peer.FSM.Current(), resource.PeerStateFailed) - assert.Equal(peer.Task.FSM.Current(), resource.TaskStateFailed) + assert.Equal(peer.FSM.Current(), standard.PeerStateFailed) + assert.Equal(peer.Task.FSM.Current(), standard.TaskStateFailed) assert.Equal(peer.Task.ContentLength.Load(), int64(-1)) assert.Equal(peer.Task.TotalPieceCount.Load(), int32(0)) assert.Equal(peer.Task.DirectPiece, []byte{}) @@ -1884,19 +1897,20 @@ func TestServiceV2_handleDownloadPeerBackToSourceFailedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, peerManager, res.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) + tc.run(t, svc, peer, peerManager, resource.EXPECT(), peerManager.EXPECT(), dynconfig.EXPECT()) }) } } @@ -1905,7 +1919,7 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { tests := []struct { name string req *schedulerv2.DownloadPieceFinishedRequest - run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) }{ { name: "invalid digest", @@ -1921,7 +1935,7 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPieceFinishedRequest(peer.ID, req), status.Error(codes.InvalidArgument, "invalid digest")) }, @@ -1940,7 +1954,7 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -1964,7 +1978,7 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2006,7 +2020,7 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2042,19 +2056,20 @@ func TestServiceV2_handleDownloadPieceFinishedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.run(t, svc, tc.req, peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) }) } } @@ -2063,7 +2078,7 @@ func TestServiceV2_handleDownloadPieceBackToSourceFinishedRequest(t *testing.T) tests := []struct { name string req *schedulerv2.DownloadPieceBackToSourceFinishedRequest - run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) }{ { name: "invalid digest", @@ -2079,7 +2094,7 @@ func TestServiceV2_handleDownloadPieceBackToSourceFinishedRequest(t *testing.T) CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { assert := assert.New(t) assert.ErrorIs(svc.handleDownloadPieceBackToSourceFinishedRequest(context.Background(), peer.ID, req), status.Error(codes.InvalidArgument, "invalid digest")) }, @@ -2098,7 +2113,7 @@ func TestServiceV2_handleDownloadPieceBackToSourceFinishedRequest(t *testing.T) CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -2122,7 +2137,7 @@ func TestServiceV2_handleDownloadPieceBackToSourceFinishedRequest(t *testing.T) CreatedAt: timestamppb.New(mockPiece.CreatedAt), }, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFinishedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2167,19 +2182,20 @@ func TestServiceV2_handleDownloadPieceBackToSourceFinishedRequest(t *testing.T) ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.run(t, svc, tc.req, peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) }) } } @@ -2188,8 +2204,8 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { tests := []struct { name string req *schedulerv2.DownloadPieceFailedRequest - run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) }{ { name: "peer can not be loaded", @@ -2197,8 +2213,8 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { ParentId: mockSeedPeerID, Temporary: true, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -2214,8 +2230,8 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { ParentId: mockSeedPeerID, Temporary: false, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2231,8 +2247,8 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { ParentId: mockSeedPeerID, Temporary: true, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2253,8 +2269,8 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { ParentId: mockSeedPeerID, Temporary: true, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2277,19 +2293,20 @@ func TestServiceV2_handleDownloadPieceFailedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.run(t, svc, tc.req, peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) }) } } @@ -2300,14 +2317,14 @@ func TestServiceV2_handleDownloadPieceBackToSourceFailedRequest(t *testing.T) { tests := []struct { name string req *schedulerv2.DownloadPieceBackToSourceFailedRequest - run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) + run func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) }{ { name: "peer can not be loaded", req: &schedulerv2.DownloadPieceBackToSourceFailedRequest{}, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(nil, false).Times(1), @@ -2322,8 +2339,8 @@ func TestServiceV2_handleDownloadPieceBackToSourceFailedRequest(t *testing.T) { req: &schedulerv2.DownloadPieceBackToSourceFailedRequest{ PieceNumber: &mockPieceNumber, }, - run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, - mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, req *schedulerv2.DownloadPieceBackToSourceFailedRequest, peer *standard.Peer, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, + mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(peer.ID)).Return(peer, true).Times(1), @@ -2342,19 +2359,20 @@ func TestServiceV2_handleDownloadPieceBackToSourceFailedRequest(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - peerManager := resource.NewMockPeerManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.req, peer, peerManager, res.EXPECT(), peerManager.EXPECT()) + tc.run(t, svc, tc.req, peer, peerManager, resource.EXPECT(), peerManager.EXPECT()) }) } } @@ -2366,16 +2384,16 @@ func TestServiceV2_handleResource(t *testing.T) { tests := []struct { name string download *commonv2.Download - run func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) + run func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) }{ { name: "host can not be loaded", download: &commonv2.Download{}, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(nil, false).Times(1), @@ -2393,9 +2411,9 @@ func TestServiceV2_handleResource(t *testing.T) { FilteredQueryParams: []string{"bar"}, RequestHeader: map[string]string{"baz": "bas"}, }, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(mockHost, true).Times(1), @@ -2423,9 +2441,9 @@ func TestServiceV2_handleResource(t *testing.T) { RequestHeader: map[string]string{"baz": "bas"}, Digest: &dgst, }, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(mockHost, true).Times(1), @@ -2453,9 +2471,9 @@ func TestServiceV2_handleResource(t *testing.T) { download: &commonv2.Download{ Digest: &mismatchDgst, }, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(mockHost, true).Times(1), @@ -2476,9 +2494,9 @@ func TestServiceV2_handleResource(t *testing.T) { RequestHeader: map[string]string{"baz": "bas"}, Digest: &dgst, }, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(mockHost, true).Times(1), @@ -2513,9 +2531,9 @@ func TestServiceV2_handleResource(t *testing.T) { Length: uint64(mockPeerRange.Length), }, }, - run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, - hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, - mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + run: func(t *testing.T, svc *V2, download *commonv2.Download, stream schedulerv2.Scheduler_AnnouncePeerServer, mockHost *standard.Host, mockTask *standard.Task, mockPeer *standard.Peer, + hostManager standard.HostManager, taskManager standard.TaskManager, peerManager standard.PeerManager, mr *standard.MockResourceMockRecorder, mh *standard.MockHostManagerMockRecorder, + mt *standard.MockTaskManagerMockRecorder, mp *standard.MockPeerManagerMockRecorder) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Eq(mockHost.ID)).Return(mockHost, true).Times(1), @@ -2552,22 +2570,23 @@ func TestServiceV2_handleResource(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - hostManager := resource.NewMockHostManager(ctl) - taskManager := resource.NewMockTaskManager(ctl) - peerManager := resource.NewMockPeerManager(ctl) + hostManager := standard.NewMockHostManager(ctl) + taskManager := standard.NewMockTaskManager(ctl) + peerManager := standard.NewMockPeerManager(ctl) stream := schedulerv2mocks.NewMockScheduler_AnnouncePeerServer(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + mockPeer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig}, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, tc.download, stream, mockHost, mockTask, mockPeer, hostManager, taskManager, peerManager, res.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT()) + tc.run(t, svc, tc.download, stream, mockHost, mockTask, mockPeer, hostManager, taskManager, peerManager, resource.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT()) }) } } @@ -2576,7 +2595,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { tests := []struct { name string config config.Config - run func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) + run func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) }{ { name: "priority is Priority_LEVEL6 and enable seed peer", @@ -2585,7 +2604,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2609,7 +2628,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2633,7 +2652,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: false, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL6 assert := assert.New(t) @@ -2648,7 +2667,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2672,7 +2691,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2696,7 +2715,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: false, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL5 assert := assert.New(t) @@ -2711,7 +2730,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2735,7 +2754,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(1) defer wg.Wait() @@ -2759,7 +2778,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: false, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL4 assert := assert.New(t) @@ -2774,7 +2793,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL3 assert := assert.New(t) @@ -2789,7 +2808,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL2 assert := assert.New(t) @@ -2803,7 +2822,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority_LEVEL1 assert := assert.New(t) @@ -2817,7 +2836,7 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { Enable: true, }, }, - run: func(t *testing.T, svc *V2, peer *resource.Peer, seedPeerClient resource.SeedPeer, mr *resource.MockResourceMockRecorder, ms *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *V2, peer *standard.Peer, seedPeerClient standard.SeedPeer, mr *standard.MockResourceMockRecorder, ms *standard.MockSeedPeerMockRecorder) { peer.Priority = commonv2.Priority(100) assert := assert.New(t) @@ -2831,19 +2850,20 @@ func TestServiceV2_downloadTaskBySeedPeer(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() scheduling := schedulingmocks.NewMockScheduling(ctl) - res := resource.NewMockResource(ctl) + resource := standard.NewMockResource(ctl) + persistentCacheResource := persistentcache.NewMockResource(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) - seedPeerClient := resource.NewMockSeedPeer(ctl) + seedPeerClient := standard.NewMockSeedPeer(ctl) - mockHost := resource.NewHost( + mockHost := standard.NewHost( mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength)) - peer := resource.NewPeer(mockPeerID, mockTask, mockHost) - svc := NewV2(&tc.config, res, scheduling, dynconfig, storage) + mockTask := standard.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, standard.WithDigest(mockTaskDigest), standard.WithPieceLength(mockTaskPieceLength)) + peer := standard.NewPeer(mockPeerID, mockTask, mockHost) + svc := NewV2(&tc.config, resource, persistentCacheResource, scheduling, dynconfig, storage) - tc.run(t, svc, peer, seedPeerClient, res.EXPECT(), seedPeerClient.EXPECT()) + tc.run(t, svc, peer, seedPeerClient, resource.EXPECT(), seedPeerClient.EXPECT()) }) } }