diff --git a/Dockerfile b/Dockerfile index 25b314a111..3ce8f60036 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,6 +42,8 @@ RUN mkdir -p /output/usr/bin && \ export GOARM=$( echo "${GOARM}" | cut -c2-) && \ go build -o /output/${BIN} \ -ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \ + go build -o /output/velero-restore-helper \ + -ldflags "${LDFLAGS}" ${PKG}/cmd/velero-restore-helper && \ go build -o /output/velero-helper \ -ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \ go clean -modcache -cache diff --git a/Makefile b/Makefile index 67e528bf8e..2c68bd947f 100644 --- a/Makefile +++ b/Makefile @@ -148,17 +148,14 @@ GOBIN=$$(pwd)/.go/bin # If you want to build all containers, see the 'all-containers' rule. all: @$(MAKE) build - @$(MAKE) build BIN=velero-restore-helper build-%: @$(MAKE) --no-print-directory ARCH=$* build - @$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restore-helper all-build: $(addprefix build-, $(CLI_PLATFORMS)) all-containers: @$(MAKE) --no-print-directory container - @$(MAKE) --no-print-directory container BIN=velero-restore-helper local: build-dirs # Add DEBUG=1 to enable debug locally diff --git a/changelogs/unreleased/8518-Lyndon-Li b/changelogs/unreleased/8518-Lyndon-Li new file mode 100644 index 0000000000..94a8a01587 --- /dev/null +++ b/changelogs/unreleased/8518-Lyndon-Li @@ -0,0 +1 @@ +Make fs-backup work on linux nodes with the new Velero deployment and disable fs-backup if the source/target pod is running in non-linux node (#8424) \ No newline at end of file diff --git a/changelogs/unreleased/8555-Lyndon-Li b/changelogs/unreleased/8555-Lyndon-Li new file mode 100644 index 0000000000..b209289b7d --- /dev/null +++ b/changelogs/unreleased/8555-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #8418, support data mover backup for Windows nodes \ No newline at end of file diff --git a/changelogs/unreleased/8569-Lyndon-Li b/changelogs/unreleased/8569-Lyndon-Li new file mode 100644 index 0000000000..336a9856c9 --- /dev/null +++ b/changelogs/unreleased/8569-Lyndon-Li @@ -0,0 +1 @@ +fs uploader and block uploader support Windows nodes \ No newline at end of file diff --git a/changelogs/unreleased/8574-ywk253100 b/changelogs/unreleased/8574-ywk253100 new file mode 100644 index 0000000000..c6f3b7bc40 --- /dev/null +++ b/changelogs/unreleased/8574-ywk253100 @@ -0,0 +1 @@ +Merge restore helper image into Velero server image \ No newline at end of file diff --git a/changelogs/unreleased/8575-ywk253100 b/changelogs/unreleased/8575-ywk253100 new file mode 100644 index 0000000000..30916f4e09 --- /dev/null +++ b/changelogs/unreleased/8575-ywk253100 @@ -0,0 +1 @@ +Clear validation errors when schedule is valid \ No newline at end of file diff --git a/hack/fix_restic_cve.txt b/hack/fix_restic_cve.txt index aca7982590..f974f098ba 100644 --- a/hack/fix_restic_cve.txt +++ b/hack/fix_restic_cve.txt @@ -1,5 +1,5 @@ diff --git a/go.mod b/go.mod -index 5f939c481..1caa51275 100644 +index 5f939c481..95d29c82b 100644 --- a/go.mod +++ b/go.mod @@ -24,32 +24,32 @@ require ( @@ -9,17 +9,18 @@ index 5f939c481..1caa51275 100644 - golang.org/x/crypto v0.5.0 - golang.org/x/net v0.5.0 - golang.org/x/oauth2 v0.4.0 -+ golang.org/x/crypto v0.21.0 -+ golang.org/x/net v0.23.0 -+ golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.1.0 +- golang.org/x/sync v0.1.0 - golang.org/x/sys v0.4.0 - golang.org/x/term v0.4.0 - golang.org/x/text v0.6.0 - google.golang.org/api v0.106.0 -+ golang.org/x/sys v0.18.0 -+ golang.org/x/term v0.18.0 -+ golang.org/x/text v0.14.0 ++ golang.org/x/crypto v0.31.0 ++ golang.org/x/net v0.33.0 ++ golang.org/x/oauth2 v0.7.0 ++ golang.org/x/sync v0.10.0 ++ golang.org/x/sys v0.28.0 ++ golang.org/x/term v0.27.0 ++ golang.org/x/text v0.21.0 + google.golang.org/api v0.114.0 ) @@ -62,7 +63,7 @@ index 5f939c481..1caa51275 100644 gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum -index 026e1d2fa..27d4207f4 100644 +index 026e1d2fa..d164b17e6 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,13 @@ @@ -126,19 +127,19 @@ index 026e1d2fa..27d4207f4 100644 golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= ++golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= ++golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -@@ -189,11 +189,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL +@@ -189,17 +189,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= ++golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= ++golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= @@ -147,27 +148,35 @@ index 026e1d2fa..27d4207f4 100644 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= ++golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -214,17 +214,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= ++golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= ++golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= ++golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= ++golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= ++golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= ++golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/internal/velero/images.go b/internal/velero/images.go index b21d4aff8c..cefbd14412 100644 --- a/internal/velero/images.go +++ b/internal/velero/images.go @@ -43,9 +43,3 @@ func ImageTag() string { func DefaultVeleroImage() string { return fmt.Sprintf("%s/%s:%s", imageRegistry(), "velero", ImageTag()) } - -// DefaultRestoreHelperImage returns the default container image to use for the restore helper -// for this version of Velero. -func DefaultRestoreHelperImage() string { - return fmt.Sprintf("%s/%s:%s", imageRegistry(), "velero-restore-helper", ImageTag()) -} diff --git a/internal/velero/images_test.go b/internal/velero/images_test.go index 4e63dc543d..5a68a4fd1b 100644 --- a/internal/velero/images_test.go +++ b/internal/velero/images_test.go @@ -134,7 +134,3 @@ func testDefaultImage(t *testing.T, defaultImageFn func() string, imageName stri func TestDefaultVeleroImage(t *testing.T) { testDefaultImage(t, DefaultVeleroImage, "velero") } - -func TestDefaultRestoreHelperImage(t *testing.T) { - testDefaultImage(t, DefaultRestoreHelperImage, "velero-restore-helper") -} diff --git a/pkg/cmd/cli/datamover/backup.go b/pkg/cmd/cli/datamover/backup.go index 4d704b04c1..7511fef8ed 100644 --- a/pkg/cmd/cli/datamover/backup.go +++ b/pkg/cmd/cli/datamover/backup.go @@ -168,7 +168,24 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi return nil, errors.Wrap(err, "error to create client") } - cache, err := ctlcache.New(clientConfig, cacheOption) + var cache ctlcache.Cache + retry := 10 + for { + cache, err = ctlcache.New(clientConfig, cacheOption) + if err == nil { + break + } + + retry-- + if retry == 0 { + break + } + + logger.WithError(err).Warn("Failed to create client cache, need retry") + + time.Sleep(time.Second) + } + if err != nil { cancelFunc() return nil, errors.Wrap(err, "error to create client cache") diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index c5d5feb339..f455d6d4f3 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -398,7 +398,9 @@ func (o *Options) Run(c *cobra.Command, f client.Factory) error { if _, err = install.NodeAgentIsReady(dynamicFactory, o.Namespace); err != nil { return errors.Wrap(err, errorMsg) } + } + if o.UseNodeAgentWindows { fmt.Println("Waiting for node-agent-windows daemonset to be ready.") if _, err = install.NodeAgentWindowsIsReady(dynamicFactory, o.Namespace); err != nil { return errors.Wrap(err, errorMsg) diff --git a/pkg/cmd/cli/repomantenance/maintenance.go b/pkg/cmd/cli/repomantenance/maintenance.go index 2d9287296d..5e83a079c4 100644 --- a/pkg/cmd/cli/repomantenance/maintenance.go +++ b/pkg/cmd/cli/repomantenance/maintenance.go @@ -65,6 +65,8 @@ func (o *Options) Run(f velerocli.Factory) { logger := logging.DefaultLogger(o.LogLevelFlag.Parse(), o.FormatFlag.Parse()) logger.SetOutput(os.Stdout) + time.Sleep(time.Minute) + pruneError := o.runRepoPrune(f, f.Namespace(), logger) defer func() { if pruneError != nil { diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 1c5b061328..b0f8aae3f6 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -295,7 +295,7 @@ func newPodVolumeRestoreItemAction(f client.Factory) plugincommon.HandlerInitial return nil, err } - return ria.NewPodVolumeRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), crClient), nil + return ria.NewPodVolumeRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), crClient, f.Namespace()) } } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 01a2230d11..82c876761d 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -82,6 +82,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/restore" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/filesystem" + "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -471,10 +472,20 @@ func (s *server) veleroResourcesExist() error { func (s *server) checkNodeAgent() { // warn if node agent does not exist - if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound { - s.logger.Warn("Velero node agent not found; pod volume backups/restores will not work until it's created") - } else if err != nil { - s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent") + if kube.WithLinuxNode(s.ctx, s.crClient, s.logger) { + if err := nodeagent.IsRunningOnLinux(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound { + s.logger.Warn("Velero node agent not found for linux nodes; pod volume backups/restores and data mover backups/restores will not work until it's created") + } else if err != nil { + s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent for linux nodes") + } + } + + if kube.WithWindowsNode(s.ctx, s.crClient, s.logger) { + if err := nodeagent.IsRunningOnWindows(s.ctx, s.kubeClient, s.namespace); err == nodeagent.ErrDaemonSetNotFound { + s.logger.Warn("Velero node agent not found for Windows nodes; pod volume backups/restores and data mover backups/restores will not work until it's created") + } else if err != nil { + s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent for Windows nodes") + } } } diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index 347bcfed58..45b367cd8a 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -182,7 +182,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name} for _, k := range util.ThirdPartyLabels { - if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k); err != nil { + if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k, kube.NodeOSLinux); err != nil { if err != nodeagent.ErrNodeAgentLabelNotFound { log.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k) } diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 41795f9d15..66f5b67f76 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -803,6 +803,15 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC) } + nodeOS, err := kube.GetPVCAttachingNodeOS(pvc, r.kubeClient.CoreV1(), r.kubeClient.StorageV1(), r.logger) + if err != nil { + return nil, errors.Wrapf(err, "failed to get attaching node OS for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC) + } + + if err := kube.HasNodeWithOS(context.Background(), nodeOS, r.kubeClient.CoreV1()); err != nil { + return nil, errors.Wrapf(err, "no appropriate node to run data upload for PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC) + } + accessMode := exposer.AccessModeFileSystem if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1.PersistentVolumeBlock { accessMode = exposer.AccessModeBlock @@ -810,7 +819,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name} for _, k := range util.ThirdPartyLabels { - if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k); err != nil { + if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k, nodeOS); err != nil { if err != nodeagent.ErrNodeAgentLabelNotFound { r.logger.WithError(err).Warnf("Failed to check node-agent label, skip adding host pod label %s", k) } @@ -831,6 +840,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload Affinity: r.loadAffinity, BackupPVCConfig: r.backupPVCConfig, Resources: r.podResources, + NodeOS: nodeOS, }, nil } return nil, nil diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index 8e3b1688b6..f480a692c5 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -59,6 +59,7 @@ import ( velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) const dataUploadName = "dataupload-1" @@ -187,6 +188,8 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci }, } + node := builder.ForNode("fake-node").Labels(map[string]string{kube.NodeOSLabel: kube.NodeOSLinux}).Result() + dataPathMgr := datapath.NewManager(1) now, err := time.Parse(time.RFC1123, time.RFC1123) @@ -229,7 +232,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci } fakeSnapshotClient := snapshotFake.NewSimpleClientset(vsObject, vscObj) - fakeKubeClient := clientgofake.NewSimpleClientset(daemonSet) + fakeKubeClient := clientgofake.NewSimpleClientset(daemonSet, node) return NewDataUploadReconciler( fakeClient, diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index 4b89e98a3e..799a8c77a2 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -128,6 +128,7 @@ func (c *scheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c schedule.Status.ValidationErrors = errs } else { schedule.Status.Phase = velerov1.SchedulePhaseEnabled + schedule.Status.ValidationErrors = nil } scheduleNeedsPatch := false diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index 9b9ebe5479..043462792d 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -73,6 +73,9 @@ type CSISnapshotExposeParam struct { // Resources defines the resource requirements of the hosting pod Resources corev1.ResourceRequirements + + // NodeOS specifies the OS of node that the source volume is attaching + NodeOS string } // CSISnapshotExposeWaitParam define the input param for WaitExposed of CSI snapshots @@ -212,6 +215,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje csiExposeParam.Resources, backupPVCReadOnly, spcNoRelabeling, + csiExposeParam.NodeOS, ) if err != nil { return errors.Wrap(err, "error to create backup pod") @@ -517,13 +521,14 @@ func (e *csiSnapshotExposer) createBackupPod( resources corev1.ResourceRequirements, backupPVCReadOnly bool, spcNoRelabeling bool, + nodeOS string, ) (*corev1.Pod, error) { podName := ownerObject.Name containerName := string(ownerObject.UID) volumeName := string(ownerObject.UID) - podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace) + podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, nodeOS) if err != nil { return nil, errors.Wrap(err, "error to get inherited pod info from node-agent") } @@ -567,13 +572,40 @@ func (e *csiSnapshotExposer) createBackupPod( args = append(args, podInfo.logFormatArgs...) args = append(args, podInfo.logLevelArgs...) - userID := int64(0) - affinityList := make([]*kube.LoadAffinity, 0) if affinity != nil { affinityList = append(affinityList, affinity) } + var securityCtx *corev1.PodSecurityContext + nodeSelector := map[string]string{} + podOS := corev1.PodOS{} + if nodeOS == kube.NodeOSWindows { + userID := "ContainerAdministrator" + securityCtx = &corev1.PodSecurityContext{ + WindowsOptions: &corev1.WindowsSecurityContextOptions{ + RunAsUserName: &userID, + }, + } + + nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows + podOS.Name = kube.NodeOSWindows + } else { + userID := int64(0) + securityCtx = &corev1.PodSecurityContext{ + RunAsUser: &userID, + } + + if spcNoRelabeling { + securityCtx.SELinuxOptions = &corev1.SELinuxOptions{ + Type: "spc_t", + } + } + + nodeSelector[kube.NodeOSLabel] = kube.NodeOSLinux + podOS.Name = kube.NodeOSLinux + } + pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -602,7 +634,9 @@ func (e *csiSnapshotExposer) createBackupPod( }, }, }, - Affinity: kube.ToSystemAffinity(affinityList), + NodeSelector: nodeSelector, + OS: &podOS, + Affinity: kube.ToSystemAffinity(affinityList), Containers: []corev1.Container{ { Name: containerName, @@ -625,17 +659,9 @@ func (e *csiSnapshotExposer) createBackupPod( TerminationGracePeriodSeconds: &gracePeriod, Volumes: volumes, RestartPolicy: corev1.RestartPolicyNever, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &userID, - }, + SecurityContext: securityCtx, }, } - if spcNoRelabeling { - pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ - Type: "spc_t", - } - } - return e.kubeClient.CoreV1().Pods(ownerObject.Namespace).Create(ctx, pod, metav1.CreateOptions{}) } diff --git a/pkg/exposer/generic_restore.go b/pkg/exposer/generic_restore.go index 7a7df90385..b85775389e 100644 --- a/pkg/exposer/generic_restore.go +++ b/pkg/exposer/generic_restore.go @@ -354,7 +354,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec containerName := string(ownerObject.UID) volumeName := string(ownerObject.UID) - podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace) + podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace, kube.NodeOSLinux) if err != nil { return nil, errors.Wrap(err, "error to get inherited pod info from node-agent") } diff --git a/pkg/exposer/image.go b/pkg/exposer/image.go index daf6c1adc0..da399cce5c 100644 --- a/pkg/exposer/image.go +++ b/pkg/exposer/image.go @@ -38,10 +38,10 @@ type inheritedPodInfo struct { logFormatArgs []string } -func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string) (inheritedPodInfo, error) { +func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string, osType string) (inheritedPodInfo, error) { podInfo := inheritedPodInfo{} - podSpec, err := nodeagent.GetPodSpec(ctx, client, veleroNamespace) + podSpec, err := nodeagent.GetPodSpec(ctx, client, veleroNamespace, osType) if err != nil { return podInfo, errors.Wrap(err, "error to get node-agent pod template") } diff --git a/pkg/exposer/image_test.go b/pkg/exposer/image_test.go index 1a2e038f0d..18626174b0 100644 --- a/pkg/exposer/image_test.go +++ b/pkg/exposer/image_test.go @@ -26,6 +26,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" + "github.com/vmware-tanzu/velero/pkg/util/kube" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" @@ -322,7 +324,7 @@ func TestGetInheritedPodInfo(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace) + info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace, kube.NodeOSLinux) if test.expectErr == "" { assert.NoError(t, err) diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index 898ea1e018..b31b1dda5f 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -33,9 +33,12 @@ import ( ) const ( - // daemonSet is the name of the Velero node agent daemonset. + // daemonSet is the name of the Velero node agent daemonset on linux nodes. daemonSet = "node-agent" + // daemonsetWindows is the name of the Velero node agent daemonset on Windows nodes. + daemonsetWindows = "node-agent-windows" + // nodeAgentRole marks pods with node-agent role on all nodes. nodeAgentRole = "node-agent" ) @@ -92,9 +95,16 @@ type Configs struct { PodResources *kube.PodResources `json:"podResources,omitempty"` } -// IsRunning checks if the node agent daemonset is running properly. If not, return the error found -func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { - if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) { +func IsRunningOnLinux(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { + return isRunning(ctx, kubeClient, namespace, daemonSet) +} + +func IsRunningOnWindows(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { + return isRunning(ctx, kubeClient, namespace, daemonsetWindows) +} + +func isRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string, daemonset string) error { + if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonset, metav1.GetOptions{}); apierrors.IsNotFound(err) { return ErrDaemonSetNotFound } else if err != nil { return err @@ -147,10 +157,15 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC return errors.Errorf("daemonset pod not found in running state in node %s", nodeName) } -func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (*v1.PodSpec, error) { - ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}) +func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string, osType string) (*v1.PodSpec, error) { + dsName := daemonSet + if osType == kube.NodeOSWindows { + dsName = daemonsetWindows + } + + ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrap(err, "error to get node-agent daemonset") + return nil, errors.Wrapf(err, "error to get %s daemonset", dsName) } return &ds.Spec.Template.Spec, nil @@ -180,10 +195,15 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int return configs, nil } -func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string) (string, error) { - ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}) +func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string, osType string) (string, error) { + dsName := daemonSet + if osType == kube.NodeOSWindows { + dsName = daemonsetWindows + } + + ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{}) if err != nil { - return "", errors.Wrap(err, "error getting node-agent daemonset") + return "", errors.Wrapf(err, "error getting %s daemonset", dsName) } if ds.Spec.Template.Labels == nil { diff --git a/pkg/nodeagent/node_agent_test.go b/pkg/nodeagent/node_agent_test.go index 700acdec8f..a153e1e8c2 100644 --- a/pkg/nodeagent/node_agent_test.go +++ b/pkg/nodeagent/node_agent_test.go @@ -31,6 +31,7 @@ import ( clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/vmware-tanzu/velero/pkg/builder" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) type reactor struct { @@ -40,7 +41,7 @@ type reactor struct { } func TestIsRunning(t *testing.T) { - daemonSet := &appsv1.DaemonSet{ + ds := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: "fake-ns", Name: "node-agent", @@ -80,7 +81,7 @@ func TestIsRunning(t *testing.T) { name: "succeed", namespace: "fake-ns", kubeClientObj: []runtime.Object{ - daemonSet, + ds, }, }, } @@ -93,7 +94,7 @@ func TestIsRunning(t *testing.T) { fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) } - err := IsRunning(context.TODO(), fakeKubeClient, test.namespace) + err := isRunning(context.TODO(), fakeKubeClient, test.namespace, daemonSet) if test.expectErr == "" { assert.NoError(t, err) } else { @@ -229,7 +230,7 @@ func TestGetPodSpec(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace) + spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace, kube.NodeOSLinux) if test.expectErr == "" { assert.NoError(t, err) assert.Equal(t, *spec, test.expectSpec) @@ -450,7 +451,7 @@ func TestGetLabelValue(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) - value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label") + value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label", kube.NodeOSLinux) if test.expectErr == "" { assert.NoError(t, err) assert.Equal(t, test.expectedValue, value) diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 0a0c63eff1..29452344e2 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -206,6 +206,12 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. return nil, pvcSummary, nil } + if err := kube.IsLinuxNode(b.ctx, pod.Spec.NodeName, b.crClient); err != nil { + err := errors.Wrapf(err, "Pod %s/%s is not running in linux node(%s), skip", pod.Namespace, pod.Name, pod.Spec.NodeName) + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, []error{err} + } + err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.crClient) if err != nil { skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index c21ad5ebb9..9414368307 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -303,6 +303,14 @@ func createPVBObj(fail bool, withSnapshot bool, index int, uploaderType string) return pvbObj } +func createNodeObj() *corev1api.Node { + return builder.ForNode("fake-node-name").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result() +} + +func createWindowsNodeObj() *corev1api.Node { + return builder.ForNode("fake-node-name").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() +} + func TestBackupPodVolumes(t *testing.T) { scheme := runtime.NewScheme() velerov1api.AddToScheme(scheme) @@ -358,13 +366,32 @@ func TestBackupPodVolumes(t *testing.T) { uploaderType: "kopia", bsl: "fake-bsl", }, + { + name: "pod is not running on Linux node", + volumes: []string{ + "fake-volume-1", + "fake-volume-2", + }, + kubeClientObj: []runtime.Object{ + createNodeAgentPodObj(true), + createWindowsNodeObj(), + }, + sourcePod: createPodObj(false, false, false, 2), + uploaderType: "kopia", + errs: []string{ + "Pod fake-ns/fake-pod is not running in linux node(fake-node-name), skip", + }, + }, { name: "node-agent pod is not running in node", volumes: []string{ "fake-volume-1", "fake-volume-2", }, - sourcePod: createPodObj(true, false, false, 2), + sourcePod: createPodObj(true, false, false, 2), + kubeClientObj: []runtime.Object{ + createNodeObj(), + }, uploaderType: "kopia", errs: []string{ "daemonset pod not found in running state in node fake-node-name", @@ -379,6 +406,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, false, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), }, uploaderType: "kopia", mockGetRepositoryType: true, @@ -395,6 +423,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, false, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), }, uploaderType: "kopia", errs: []string{ @@ -410,6 +439,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, false, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), }, ctlClientObj: []runtime.Object{ createBackupRepoObj(), @@ -427,6 +457,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, true, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), }, ctlClientObj: []runtime.Object{ createBackupRepoObj(), @@ -448,6 +479,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, true, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), createPVCObj(1), createPVCObj(2), }, @@ -471,6 +503,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, true, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), createPVCObj(1), createPVCObj(2), createPVObj(1, true), @@ -482,6 +515,7 @@ func TestBackupPodVolumes(t *testing.T) { runtimeScheme: scheme, uploaderType: "kopia", bsl: "fake-bsl", + errs: []string{}, }, { name: "volume not mounted by pod should be skipped", @@ -492,6 +526,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, true, false, 2), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), createPVCObj(1), createPVCObj(2), createPVObj(1, false), @@ -503,6 +538,7 @@ func TestBackupPodVolumes(t *testing.T) { runtimeScheme: scheme, uploaderType: "kopia", bsl: "fake-bsl", + errs: []string{}, }, { name: "return completed pvbs", @@ -512,6 +548,7 @@ func TestBackupPodVolumes(t *testing.T) { sourcePod: createPodObj(true, true, true, 1), kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), + createNodeObj(), createPVCObj(1), createPVObj(1, false), }, @@ -522,6 +559,7 @@ func TestBackupPodVolumes(t *testing.T) { uploaderType: "kopia", bsl: "fake-bsl", pvbs: 1, + errs: []string{}, }, } // TODO add more verification around PVCBackupSummary returned by "BackupPodVolumes" @@ -568,8 +606,8 @@ func TestBackupPodVolumes(t *testing.T) { pvbs, _, errs := bp.BackupPodVolumes(backupObj, test.sourcePod, test.volumes, nil, velerotest.NewLogger()) - if errs == nil { - assert.Nil(t, test.errs) + if test.errs == nil { + assert.NoError(t, err) } else { for i := 0; i < len(errs); i++ { assert.EqualError(t, errs[i], test.errs[i]) diff --git a/pkg/podvolume/restorer.go b/pkg/podvolume/restorer.go index 4b3e4354dd..18e7717631 100644 --- a/pkg/podvolume/restorer.go +++ b/pkg/podvolume/restorer.go @@ -122,7 +122,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData, tracker *volume.RestoreVo return nil } - if err := nodeagent.IsRunning(r.ctx, r.kubeClient, data.Restore.Namespace); err != nil { + if err := nodeagent.IsRunningOnLinux(r.ctx, r.kubeClient, data.Restore.Namespace); err != nil { return []error{errors.Wrapf(err, "error to check node agent status")} } @@ -213,6 +213,12 @@ func (r *restorer) RestorePodVolumes(data RestoreData, tracker *volume.RestoreVo } else if err != nil { r.log.WithError(err).Error("Failed to check node-agent pod status, disengage") } else { + if err := kube.IsLinuxNode(checkCtx, nodeName, r.crClient); err != nil { + r.log.WithField("node", nodeName).WithError(err).Error("Restored pod is not running in linux node") + r.nodeAgentCheck <- errors.Wrapf(err, "restored pod %s/%s is not running in linux node(%s)", data.Pod.Namespace, data.Pod.Name, nodeName) + return + } + err = nodeagent.IsRunningInNode(checkCtx, data.Restore.Namespace, nodeName, r.crClient) if err != nil { r.log.WithField("node", nodeName).WithError(err).Error("node-agent pod is not running in node, abort the restore") diff --git a/pkg/podvolume/restorer_test.go b/pkg/podvolume/restorer_test.go index 5d52cf21d2..1af4da4294 100644 --- a/pkg/podvolume/restorer_test.go +++ b/pkg/podvolume/restorer_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/client-go/kubernetes" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" - ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -314,6 +313,30 @@ func TestRestorePodVolumes(t *testing.T) { }, }, }, + { + name: "pod is not running on linux nodes", + pvbs: []*velerov1api.PodVolumeBackup{ + createPVBObj(true, true, 1, "kopia"), + }, + kubeClientObj: []runtime.Object{ + createNodeAgentDaemonset(), + createWindowsNodeObj(), + createPVCObj(1), + createPodObj(true, true, true, 1), + }, + ctlClientObj: []runtime.Object{ + createBackupRepoObj(), + }, + restoredPod: createPodObj(true, true, true, 1), + sourceNamespace: "fake-ns", + bsl: "fake-bsl", + runtimeScheme: scheme, + errs: []expectError{ + { + err: "restored pod fake-ns/fake-pod is not running in linux node(fake-node-name): os type windows for node fake-node-name is not linux", + }, + }, + }, { name: "node-agent pod is not running", pvbs: []*velerov1api.PodVolumeBackup{ @@ -321,6 +344,7 @@ func TestRestorePodVolumes(t *testing.T) { }, kubeClientObj: []runtime.Object{ createNodeAgentDaemonset(), + createNodeObj(), createPVCObj(1), createPodObj(true, true, true, 1), }, @@ -344,6 +368,7 @@ func TestRestorePodVolumes(t *testing.T) { }, kubeClientObj: []runtime.Object{ createNodeAgentDaemonset(), + createNodeObj(), createPVCObj(1), createPodObj(true, true, true, 1), createNodeAgentPodObj(true), @@ -368,11 +393,6 @@ func TestRestorePodVolumes(t *testing.T) { ctx = test.ctx } - fakeClientBuilder := ctrlfake.NewClientBuilder() - if test.runtimeScheme != nil { - fakeClientBuilder = fakeClientBuilder.WithScheme(test.runtimeScheme) - } - objClient := append(test.ctlClientObj, test.kubeClientObj...) objClient = append(objClient, test.veleroClientObj...) @@ -438,7 +458,8 @@ func TestRestorePodVolumes(t *testing.T) { for i := 0; i < len(errs); i++ { j := 0 for ; j < len(test.errs); j++ { - if errs[i].Error() == test.errs[j].err { + err := errs[i].Error() + if err == test.errs[j].err { break } } diff --git a/pkg/restore/actions/pod_volume_restore_action.go b/pkg/restore/actions/pod_volume_restore_action.go index 71af336ceb..1970f54fa1 100644 --- a/pkg/restore/actions/pod_volume_restore_action.go +++ b/pkg/restore/actions/pod_volume_restore_action.go @@ -25,9 +25,11 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,6 +42,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/restorehelper" "github.com/vmware-tanzu/velero/pkg/util/kube" + veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero" ) const ( @@ -50,17 +53,24 @@ const ( ) type PodVolumeRestoreAction struct { - logger logrus.FieldLogger - client corev1client.ConfigMapInterface - crClient ctrlclient.Client + logger logrus.FieldLogger + client corev1client.ConfigMapInterface + crClient ctrlclient.Client + veleroImage string } -func NewPodVolumeRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, crClient ctrlclient.Client) *PodVolumeRestoreAction { - return &PodVolumeRestoreAction{ - logger: logger, - client: client, - crClient: crClient, +func NewPodVolumeRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, crClient ctrlclient.Client, namespace string) (*PodVolumeRestoreAction, error) { + deployment := &appsv1.Deployment{} + if err := crClient.Get(context.TODO(), types.NamespacedName{Name: "velero", Namespace: namespace}, deployment); err != nil { + return nil, err } + image := veleroutil.GetVeleroServerImage(deployment) + return &PodVolumeRestoreAction{ + logger: logger, + client: client, + crClient: crClient, + veleroImage: image, + }, nil } func (a *PodVolumeRestoreAction) AppliesTo() (velero.ResourceSelector, error) { @@ -117,7 +127,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI return nil, err } - image := getImage(log, config) + image := getImage(log, config, a.veleroImage) log.Infof("Using image %q", image) cpuRequest, memRequest := getResourceRequests(log, config) @@ -200,16 +210,16 @@ func getCommand(log logrus.FieldLogger, config *corev1.ConfigMap) []string { return []string{config.Data["command"]} } -func getImage(log logrus.FieldLogger, config *corev1.ConfigMap) string { +func getImage(log logrus.FieldLogger, config *corev1.ConfigMap, defaultImage string) string { if config == nil { log.Debug("No config found for plugin") - return veleroimage.DefaultRestoreHelperImage() + return defaultImage } image := config.Data["image"] if image == "" { log.Debugf("No custom image configured") - return veleroimage.DefaultRestoreHelperImage() + return defaultImage } log = log.WithField("image", image) @@ -217,7 +227,6 @@ func getImage(log logrus.FieldLogger, config *corev1.ConfigMap) string { parts := strings.Split(image, "/") if len(parts) == 1 { - defaultImage := veleroimage.DefaultRestoreHelperImage() // Image supplied without registry part log.Infof("Plugin config contains image name without registry name. Using default init container image: %q", defaultImage) return defaultImage diff --git a/pkg/restore/actions/pod_volume_restore_action_test.go b/pkg/restore/actions/pod_volume_restore_action_test.go index 5b836cba15..4327214e64 100644 --- a/pkg/restore/actions/pod_volume_restore_action_test.go +++ b/pkg/restore/actions/pod_volume_restore_action_test.go @@ -25,12 +25,13 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" - veleroimage "github.com/vmware-tanzu/velero/internal/velero" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/buildinfo" @@ -48,7 +49,7 @@ func TestGetImage(t *testing.T) { } } - defaultImage := veleroimage.DefaultRestoreHelperImage() + defaultImage := "velero/velero:v1.0" tests := []struct { name string @@ -104,7 +105,7 @@ func TestGetImage(t *testing.T) { buildinfo.Version = originalVersion }() } - assert.Equal(t, test.want, getImage(velerotest.NewLogger(), test.configMap)) + assert.Equal(t, test.want, getImage(velerotest.NewLogger(), test.configMap, defaultImage)) }) } } @@ -134,7 +135,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { veleroNs = "velero" ) - defaultRestoreHelperImage := veleroimage.DefaultRestoreHelperImage() + defaultRestoreHelperImage := "velero/velero:v1.0" tests := []struct { name string @@ -265,10 +266,34 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { }, } + veleroDeployment := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "velero", + Name: "velero", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1api.PodTemplateSpec{ + Spec: corev1api.PodSpec{ + Containers: []corev1api.Container{ + { + Image: "velero/velero:v1.0", + }, + }, + }, + }, + }, + } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { clientset := fake.NewSimpleClientset() - crClient := velerotest.NewFakeControllerRuntimeClient(t, tc.podVolumeBackups...) + + objects := []runtime.Object{veleroDeployment} + objects = append(objects, tc.podVolumeBackups...) + crClient := velerotest.NewFakeControllerRuntimeClient(t, objects...) unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pod) require.NoError(t, err) @@ -295,11 +320,13 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { Result(), } - a := NewPodVolumeRestoreAction( + a, err := NewPodVolumeRestoreAction( logrus.StandardLogger(), clientset.CoreV1().ConfigMaps(veleroNs), crClient, + "velero", ) + require.NoError(t, err) // method under test res, err := a.Execute(input) diff --git a/pkg/uploader/kopia/progress.go b/pkg/uploader/kopia/progress.go index 7f0619f574..9f2498379c 100644 --- a/pkg/uploader/kopia/progress.go +++ b/pkg/uploader/kopia/progress.go @@ -138,7 +138,9 @@ func (p *Progress) HashingFile(fname string) {} func (p *Progress) ExcludedFile(fname string, numBytes int64) {} // ExcludedDir statistic the dir been excluded currently -func (p *Progress) ExcludedDir(dirname string) {} +func (p *Progress) ExcludedDir(dirname string) { + p.log.Infof("Excluded dir %s", dirname) +} // FinishedHashingFile which will called when specific file finished hash func (p *Progress) FinishedHashingFile(fname string, numBytes int64) { diff --git a/pkg/uploader/kopia/snapshot.go b/pkg/uploader/kopia/snapshot.go index c80ab155f8..fce620eb73 100644 --- a/pkg/uploader/kopia/snapshot.go +++ b/pkg/uploader/kopia/snapshot.go @@ -127,6 +127,10 @@ func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snap curPolicy.UploadPolicy.ParallelUploadAboveSize = newOptionalInt64(2 << 30) } + if runtime.GOOS == "windows" { + curPolicy.FilesPolicy.IgnoreRules = []string{"/System Volume Information/", "/$Recycle.Bin/"} + } + err := setPolicyFunc(ctx, rep, sourceInfo, curPolicy) if err != nil { return nil, errors.Wrap(err, "error to set policy") diff --git a/pkg/util/kube/node.go b/pkg/util/kube/node.go new file mode 100644 index 0000000000..da68183a55 --- /dev/null +++ b/pkg/util/kube/node.go @@ -0,0 +1,119 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package kube + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + NodeOSLinux = "linux" + NodeOSWindows = "windows" + NodeOSLabel = "kubernetes.io/os" +) + +func IsLinuxNode(ctx context.Context, nodeName string, client client.Client) error { + node := &corev1api.Node{} + if err := client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil { + return errors.Wrapf(err, "error getting node %s", nodeName) + } + + os, found := node.Labels[NodeOSLabel] + + if !found { + return errors.Errorf("no os type label for node %s", nodeName) + } + + if os != NodeOSLinux { + return errors.Errorf("os type %s for node %s is not linux", os, nodeName) + } + + return nil +} + +func WithLinuxNode(ctx context.Context, client client.Client, log logrus.FieldLogger) bool { + return withOSNode(ctx, client, NodeOSLinux, log) +} + +func WithWindowsNode(ctx context.Context, client client.Client, log logrus.FieldLogger) bool { + return withOSNode(ctx, client, NodeOSWindows, log) +} + +func withOSNode(ctx context.Context, client client.Client, osType string, log logrus.FieldLogger) bool { + nodeList := new(corev1api.NodeList) + if err := client.List(ctx, nodeList); err != nil { + log.Warnf("Failed to list nodes, cannot decide existence of nodes of OS %s", osType) + return false + } + + allNodeLabeled := true + for _, node := range nodeList.Items { + os, found := node.Labels[NodeOSLabel] + + if os == osType { + return true + } + + if !found { + allNodeLabeled = false + } + } + + if !allNodeLabeled { + log.Warnf("Not all nodes have os type label, cannot decide existence of nodes of OS %s", osType) + } + + return false +} + +func GetNodeOS(ctx context.Context, nodeName string, nodeClient corev1client.CoreV1Interface) (string, error) { + node, err := nodeClient.Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + if err != nil { + return "", errors.Wrapf(err, "error getting node %s", nodeName) + } + + if node.Labels == nil { + return "", nil + } + + return node.Labels[NodeOSLabel], nil +} + +func HasNodeWithOS(ctx context.Context, os string, nodeClient corev1client.CoreV1Interface) error { + if os == "" { + return errors.New("invalid node OS") + } + + nodes, err := nodeClient.Nodes().List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", NodeOSLabel, os)}) + if err != nil { + return errors.Wrapf(err, "error listing nodes with OS %s", os) + } + + if len(nodes.Items) == 0 { + return errors.Errorf("node with OS %s doesn't exist", os) + } + + return nil +} diff --git a/pkg/util/kube/node_test.go b/pkg/util/kube/node_test.go new file mode 100644 index 0000000000..a26285f5ff --- /dev/null +++ b/pkg/util/kube/node_test.go @@ -0,0 +1,261 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/vmware-tanzu/velero/pkg/builder" + + kubeClientFake "k8s.io/client-go/kubernetes/fake" + clientTesting "k8s.io/client-go/testing" + clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" + + velerotest "github.com/vmware-tanzu/velero/pkg/test" +) + +func TestIsLinuxNode(t *testing.T) { + nodeNoOSLabel := builder.ForNode("fake-node").Result() + nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() + nodeLinux := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result() + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + kubeClientObj []runtime.Object + err string + }{ + { + name: "error getting node", + err: "error getting node fake-node: nodes \"fake-node\" not found", + }, + { + name: "no os label", + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + }, + err: "no os type label for node fake-node", + }, + { + name: "os label does not match", + kubeClientObj: []runtime.Object{ + nodeWindows, + }, + err: "os type windows for node fake-node is not linux", + }, + { + name: "succeed", + kubeClientObj: []runtime.Object{ + nodeLinux, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClientBuilder := clientFake.NewClientBuilder() + fakeClientBuilder = fakeClientBuilder.WithScheme(scheme) + + fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() + + err := IsLinuxNode(context.TODO(), "fake-node", fakeClient) + if err != nil { + assert.EqualError(t, err, test.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestWithLinuxNode(t *testing.T) { + nodeWindows := builder.ForNode("fake-node-1").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() + nodeLinux := builder.ForNode("fake-node-2").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result() + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + kubeClientObj []runtime.Object + result bool + }{ + { + name: "error listing node", + }, + { + name: "with node of other type", + kubeClientObj: []runtime.Object{ + nodeWindows, + }, + }, + { + name: "with node of the same type", + kubeClientObj: []runtime.Object{ + nodeWindows, + nodeLinux, + }, + result: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClientBuilder := clientFake.NewClientBuilder() + fakeClientBuilder = fakeClientBuilder.WithScheme(scheme) + + fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() + + result := withOSNode(context.TODO(), fakeClient, "linux", velerotest.NewLogger()) + assert.Equal(t, test.result, result) + }) + } +} + +func TestGetNodeOSType(t *testing.T) { + nodeNoOSLabel := builder.ForNode("fake-node").Result() + nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() + nodeLinux := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result() + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + tests := []struct { + name string + kubeClientObj []runtime.Object + err string + expectedOSType string + }{ + { + name: "error getting node", + err: "error getting node fake-node: nodes \"fake-node\" not found", + }, + { + name: "no os label", + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + }, + }, + { + name: "windows node", + kubeClientObj: []runtime.Object{ + nodeWindows, + }, + expectedOSType: "windows", + }, + { + name: "linux node", + kubeClientObj: []runtime.Object{ + nodeLinux, + }, + expectedOSType: "linux", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := kubeClientFake.NewSimpleClientset(test.kubeClientObj...) + osType, err := GetNodeOS(context.TODO(), "fake-node", fakeKubeClient.CoreV1()) + if err != nil { + assert.EqualError(t, err, test.err) + } else { + assert.Equal(t, test.expectedOSType, osType) + } + }) + } +} + +func TestHasNodeWithOS(t *testing.T) { + nodeNoOSLabel := builder.ForNode("fake-node-1").Result() + nodeWindows := builder.ForNode("fake-node-2").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() + nodeLinux := builder.ForNode("fake-node-3").Labels(map[string]string{"kubernetes.io/os": "linux"}).Result() + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + kubeClientObj []runtime.Object + kubeReactors []reactor + os string + err string + }{ + { + name: "os is empty", + err: "invalid node OS", + }, + { + name: "error to list node", + kubeReactors: []reactor{ + { + verb: "list", + resource: "nodes", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-list-error") + }, + }, + }, + os: "linux", + err: "error listing nodes with OS linux: fake-list-error", + }, + { + name: "no expected node - no node", + os: "linux", + err: "node with OS linux doesn't exist", + }, + { + name: "no expected node - no node with label", + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + nodeWindows, + }, + os: "linux", + err: "node with OS linux doesn't exist", + }, + { + name: "succeed", + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + nodeWindows, + nodeLinux, + }, + os: "windows", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := kubeClientFake.NewSimpleClientset(test.kubeClientObj...) + + for _, reactor := range test.kubeReactors { + fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + } + + err := HasNodeWithOS(context.TODO(), test.os, fakeKubeClient.CoreV1()) + if test.err != "" { + assert.EqualError(t, err, test.err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/util/kube/pvc_pv.go b/pkg/util/kube/pvc_pv.go index bf7779aaa7..e91e5dab3b 100644 --- a/pkg/util/kube/pvc_pv.go +++ b/pkg/util/kube/pvc_pv.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "time" jsonpatch "github.com/evanphx/json-patch/v5" @@ -427,3 +428,47 @@ func DiagnosePV(pv *corev1api.PersistentVolume) string { diag := fmt.Sprintf("PV %s, phase %s, reason %s, message %s\n", pv.Name, pv.Status.Phase, pv.Status.Reason, pv.Status.Message) return diag } + +func GetPVCAttachingNodeOS(pvc *corev1api.PersistentVolumeClaim, nodeClient corev1client.CoreV1Interface, + storageClient storagev1.StorageV1Interface, log logrus.FieldLogger) (string, error) { + var nodeOS string + var scFsType string + + if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1api.PersistentVolumeBlock { + log.Infof("Use linux node for block mode PVC %s/%s", pvc.Namespace, pvc.Name) + return NodeOSLinux, nil + } + + if value := pvc.Annotations[KubeAnnSelectedNode]; value != "" { + os, err := GetNodeOS(context.Background(), value, nodeClient) + if err != nil { + return "", errors.Wrapf(err, "error to get os from node %s for PVC %s/%s", value, pvc.Namespace, pvc.Name) + } + + nodeOS = os + } + + if pvc.Spec.StorageClassName != nil { + sc, err := storageClient.StorageClasses().Get(context.Background(), *pvc.Spec.StorageClassName, metav1.GetOptions{}) + if err != nil { + return "", errors.Wrapf(err, "error to get storage class %s", *pvc.Spec.StorageClassName) + } + + if sc.Parameters != nil { + scFsType = strings.ToLower(sc.Parameters["csi.storage.k8s.io/fstype"]) + } + } + + if nodeOS != "" { + log.Infof("Deduced node os %s from selected node for PVC %s/%s (fsType %s)", nodeOS, pvc.Namespace, pvc.Name, scFsType) + return nodeOS, nil + } + + if scFsType == "ntfs" { + log.Infof("Deduced Windows node os from fsType for PVC %s/%s", pvc.Namespace, pvc.Name) + return NodeOSWindows, nil + } + + log.Warnf("Cannot deduce node os for PVC %s/%s, default to linux", pvc.Namespace, pvc.Name) + return NodeOSLinux, nil +} diff --git a/pkg/util/kube/pvc_pv_test.go b/pkg/util/kube/pvc_pv_test.go index 52e01ee694..2a5c2d8266 100644 --- a/pkg/util/kube/pvc_pv_test.go +++ b/pkg/util/kube/pvc_pv_test.go @@ -33,6 +33,7 @@ import ( clientTesting "k8s.io/client-go/testing" + "github.com/vmware-tanzu/velero/pkg/builder" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -1550,3 +1551,149 @@ func TestDiagnosePV(t *testing.T) { }) } } + +func TestGetPVCAttachingNodeOS(t *testing.T) { + storageClass := "fake-storage-class" + nodeNoOSLabel := builder.ForNode("fake-node").Result() + nodeWindows := builder.ForNode("fake-node").Labels(map[string]string{"kubernetes.io/os": "windows"}).Result() + + pvcObj := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-namespace", + Name: "fake-pvc", + }, + } + + blockMode := corev1api.PersistentVolumeBlock + pvcObjBlockMode := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-namespace", + Name: "fake-pvc", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeMode: &blockMode, + }, + } + + pvcObjWithNode := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-namespace", + Name: "fake-pvc", + Annotations: map[string]string{KubeAnnSelectedNode: "fake-node"}, + }, + } + + pvcObjWithStorageClass := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-namespace", + Name: "fake-pvc", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + } + + pvcObjWithBoth := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-namespace", + Name: "fake-pvc", + Annotations: map[string]string{KubeAnnSelectedNode: "fake-node"}, + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + } + + scObjWithoutFSType := &storagev1api.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-storage-class", + }, + } + + scObjWithFSType := &storagev1api.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-storage-class", + }, + Parameters: map[string]string{"csi.storage.k8s.io/fstype": "ntfs"}, + } + + tests := []struct { + name string + pvc *corev1api.PersistentVolumeClaim + kubeClientObj []runtime.Object + expectedNodeOS string + err string + }{ + { + name: "no selected node and storage class", + pvc: pvcObj, + expectedNodeOS: NodeOSLinux, + }, + { + name: "node doesn't exist", + pvc: pvcObjWithNode, + err: "error to get os from node fake-node for PVC fake-namespace/fake-pvc: error getting node fake-node: nodes \"fake-node\" not found", + }, + { + name: "node without os label", + pvc: pvcObjWithNode, + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + }, + expectedNodeOS: NodeOSLinux, + }, + { + name: "sc doesn't exist", + pvc: pvcObjWithStorageClass, + err: "error to get storage class fake-storage-class: storageclasses.storage.k8s.io \"fake-storage-class\" not found", + }, + { + name: "sc without fsType", + pvc: pvcObjWithStorageClass, + kubeClientObj: []runtime.Object{ + scObjWithoutFSType, + }, + expectedNodeOS: NodeOSLinux, + }, + { + name: "deduce from node os", + pvc: pvcObjWithBoth, + kubeClientObj: []runtime.Object{ + nodeWindows, + scObjWithFSType, + }, + expectedNodeOS: NodeOSWindows, + }, + { + name: "deduce from sc", + pvc: pvcObjWithBoth, + kubeClientObj: []runtime.Object{ + nodeNoOSLabel, + scObjWithFSType, + }, + expectedNodeOS: NodeOSWindows, + }, + { + name: "block access", + pvc: pvcObjBlockMode, + expectedNodeOS: NodeOSLinux, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + var kubeClient kubernetes.Interface = fakeKubeClient + + nodeOS, err := GetPVCAttachingNodeOS(test.pvc, kubeClient.CoreV1(), kubeClient.StorageV1(), velerotest.NewLogger()) + + if err != nil { + assert.EqualError(t, err, test.err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, test.expectedNodeOS, nodeOS) + }) + } +} diff --git a/site/content/docs/main/file-system-backup.md b/site/content/docs/main/file-system-backup.md index 71e3c2d9e1..a0f28f0dcf 100644 --- a/site/content/docs/main/file-system-backup.md +++ b/site/content/docs/main/file-system-backup.md @@ -356,8 +356,7 @@ with an infinite sleep) to mount these PVC/PV pairs prior taking a Velero backup ## Customize Restore Helper Container -Velero uses a helper init container when performing a FSB restore. By default, the image for this container is -`velero/velero-restore-helper:`, where `VERSION` matches the version/tag of the main Velero image. +Velero uses a helper init container when performing a FSB restore. By default, the image for this container is same with the Velero server container. You can customize the image that is used for this helper by creating a ConfigMap in the Velero namespace with the alternate image. In addition, you can customize the resource requirements for the init container, should you need. diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index 8999a805ff..7eb57daaec 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -67,8 +67,12 @@ func MigrationWithFS() { } func (m *migrationE2E) Init() error { + By("Call the base E2E init", func() { + Expect(m.TestCase.Init()).To(Succeed()) + }) + By("Skip check", func() { - if m.VeleroCfg.DefaultClusterContext == "" && m.VeleroCfg.StandbyClusterContext == "" { + if m.VeleroCfg.DefaultClusterContext == "" || m.VeleroCfg.StandbyClusterContext == "" { Skip("Migration test needs 2 clusters") } @@ -81,10 +85,6 @@ func (m *migrationE2E) Init() error { } }) - By("Call the base E2E init", func() { - Expect(m.TestCase.Init()).To(Succeed()) - }) - m.kibishiiData = *kibishii.DefaultKibishiiData m.kibishiiData.ExpectedNodes = 3 m.CaseBaseName = "migration-" + m.UUIDgen diff --git a/test/util/velero/install.go b/test/util/velero/install.go index 25e38fbaf3..027ca43048 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "os/exec" + "strings" "time" "github.com/pkg/errors" @@ -413,6 +414,15 @@ func createVeleroResources(ctx context.Context, cli, namespace string, args []st return errors.Wrapf(err, "failed to run velero install dry run command, stdout=%s, stderr=%s", stdout, stderr) } + // From v1.15, the Restic uploader is deprecated, + // and a warning message is printed for the install CLI. + // Need to skip the deprecation of Restic message before the generated JSON. + // Redirect to the stdout to the first curly bracket to skip the warning. + if stdout[0] != '{' { + newIndex := strings.Index(stdout, "{") + stdout = stdout[newIndex:] + } + resources := &unstructured.UnstructuredList{} if err := json.Unmarshal([]byte(stdout), resources); err != nil { return errors.Wrapf(err, "failed to unmarshal the resources: %s", stdout)