Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add tests for ListHosts() and DeleteHost() #3604

Merged
merged 1 commit into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
193 changes: 193 additions & 0 deletions scheduler/service/service_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,199 @@ func TestServiceV2_AnnounceHost(t *testing.T) {
}
}

func TestServiceV2_ListHosts(t *testing.T) {
tests := []struct {
name string
mock func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder)
expect func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error)
}{
{
name: "host loading unsuccessful",
mock: func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Range(gomock.Any()).Do(func(f func(key, value any) bool) {
f(nil, nil)
}).Return().Times(1),
)
},
expect: func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error) {
assert := assert.New(t)
assert.NoError(err)
assert.Equal(len(resp), 0)
},
},
{
name: "host loading successful",
mock: func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Range(gomock.Any()).Do(func(f func(key, value any) bool) {
f(nil, host)
}).Return().Times(1),
)
},
expect: func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error) {
assert := assert.New(t)
assert.NoError(err)
assert.Equal(len(resp), 1)
assert.EqualValues(resp[0], &commonv2.Host{
Id: mockHostID,
Type: uint32(pkgtypes.HostTypeNormal),
Hostname: "foo",
Ip: "127.0.0.1",
Port: 8003,
DownloadPort: mockRawHost.DownloadPort,
Cpu: &commonv2.CPU{
LogicalCount: mockCPU.LogicalCount,
PhysicalCount: mockCPU.PhysicalCount,
Percent: mockCPU.Percent,
ProcessPercent: mockCPU.ProcessPercent,
Times: &commonv2.CPUTimes{
User: mockCPU.Times.User,
System: mockCPU.Times.System,
Idle: mockCPU.Times.Idle,
Nice: mockCPU.Times.Nice,
Iowait: mockCPU.Times.Iowait,
Irq: mockCPU.Times.Irq,
Softirq: mockCPU.Times.Softirq,
Steal: mockCPU.Times.Steal,
Guest: mockCPU.Times.Guest,
GuestNice: mockCPU.Times.GuestNice,
},
},
Memory: &commonv2.Memory{
Total: mockMemory.Total,
Available: mockMemory.Available,
Used: mockMemory.Used,
UsedPercent: mockMemory.UsedPercent,
ProcessUsedPercent: mockMemory.ProcessUsedPercent,
Free: mockMemory.Free,
},
Network: &commonv2.Network{
TcpConnectionCount: mockNetwork.TCPConnectionCount,
UploadTcpConnectionCount: mockNetwork.UploadTCPConnectionCount,
Location: &mockNetwork.Location,
Idc: &mockNetwork.IDC,
DownloadRate: mockNetwork.DownloadRate,
DownloadRateLimit: mockNetwork.DownloadRateLimit,
UploadRate: mockNetwork.UploadRate,
UploadRateLimit: mockNetwork.UploadRateLimit,
},
Disk: &commonv2.Disk{
Total: mockDisk.Total,
Free: mockDisk.Free,
Used: mockDisk.Used,
UsedPercent: mockDisk.UsedPercent,
InodesTotal: mockDisk.InodesTotal,
InodesUsed: mockDisk.InodesUsed,
InodesFree: mockDisk.InodesFree,
InodesUsedPercent: mockDisk.InodesUsedPercent,
},
Build: &commonv2.Build{
GitVersion: mockBuild.GitVersion,
GitCommit: &mockBuild.GitCommit,
GoVersion: &mockBuild.GoVersion,
Platform: &mockBuild.Platform,
},
})
},
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
scheduling := schedulingmocks.NewMockScheduling(ctl)
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
hostManager := resource.NewMockHostManager(ctl)
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type,
resource.WithCPU(mockCPU), resource.WithMemory(mockMemory), resource.WithNetwork(mockNetwork), resource.WithDisk(mockDisk), resource.WithBuild(mockBuild))
svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage)

tc.mock(host, hostManager, res.EXPECT(), hostManager.EXPECT())
resp, err := svc.ListHosts(context.Background())
tc.expect(t, host, resp.Hosts, err)
})
}
}

func TestServiceV2_DeleteHost(t *testing.T) {
tests := []struct {
name string
mock func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder)
expect func(t *testing.T, peer *resource.Peer, err error)
}{
{
name: "host not found",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(nil, false).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.Error(err)
},
},
{
name: "host has not peers",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(host, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.NoError(err)
},
},
{
name: "peer leaves succeeded",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
host.Peers.Store(mockPeer.ID, mockPeer)
mockPeer.FSM.SetState(resource.PeerStatePending)
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(host, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.NoError(err)
assert.Equal(peer.FSM.Current(), resource.PeerStateLeave)
},
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
scheduling := schedulingmocks.NewMockScheduling(ctl)
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
hostManager := resource.NewMockHostManager(ctl)
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength))
mockPeer := resource.NewPeer(mockSeedPeerID, mockTask, host)
svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage)

tc.mock(host, mockPeer, hostManager, res.EXPECT(), hostManager.EXPECT())
tc.expect(t, mockPeer, svc.DeleteHost(context.Background(), &schedulerv2.DeleteHostRequest{HostId: mockHostID}))
})
}
}

func TestServiceV2_handleRegisterPeerRequest(t *testing.T) {
dgst := mockTaskDigest.String()

Expand Down
8 changes: 4 additions & 4 deletions test/e2e/v2/host_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(err).NotTo(HaveOccurred())

// Add taint to master node to prevent new client from starting.
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput()
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -58,7 +58,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(calculateNormalHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1))

// Remove taint in master node.
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput()
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule-").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -79,7 +79,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(err).NotTo(HaveOccurred())

// Add taint to master node to prevent new client from starting.
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput()
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -93,7 +93,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(calculateNormalHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1))

// Remove taint in master node.
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput()
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule-").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand Down
Loading