diff --git a/changelogs/CHANGELOG-1.11.md b/changelogs/CHANGELOG-1.11.md index 9846b75603..75d08d4df0 100644 --- a/changelogs/CHANGELOG-1.11.md +++ b/changelogs/CHANGELOG-1.11.md @@ -100,7 +100,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows: * Enable staticcheck linter. (#5788, @blackpiglet) * Set Kopia IgnoreUnknownTypes in ErrorHandlingPolicy to True for ignoring backup unknown file type (#5786, @qiuming-best) * Bump up Restic version to 0.15.0 (#5784, @qiuming-best) -* Add File system backup related matrics to Grafana dashboard +* Add File system backup related metrics to Grafana dashboard - Add metrics backup_warning_total for record of total warnings - Add metrics backup_last_status for record of last status of the backup (#5779, @allenxu404) * Design for Handling backup of volumes by resources filters (#5773, @qiuming-best) diff --git a/changelogs/CHANGELOG-1.8.md b/changelogs/CHANGELOG-1.8.md index 7c8f019467..e317849d1d 100644 --- a/changelogs/CHANGELOG-1.8.md +++ b/changelogs/CHANGELOG-1.8.md @@ -61,7 +61,7 @@ in progress for 1.9. * Add rbac and annotation test cases (#4455, @mqiu) * remove --crds-version in velero install command. (#4446, @jxun) * Upgrade e2e test vsphere plugin (#4440, @mqiu) -* Fix e2e test failures for the inappropriate optimaze of velero install (#4438, @mqiu) +* Fix e2e test failures for the inappropriate optimize of velero install (#4438, @mqiu) * Limit backup namespaces on test resource filtering cases (#4437, @mqiu) * Bump up Go to 1.17 (#4431, @reasonerjt) * Added ``-itemsnapshots.json.gz to the backup format. This file exists diff --git a/design/Implemented/delete-item-action.md b/design/Implemented/delete-item-action.md index 80baf685c4..799173ff39 100644 --- a/design/Implemented/delete-item-action.md +++ b/design/Implemented/delete-item-action.md @@ -175,7 +175,7 @@ If there are one or more, download the backup tarball from backup storage, untar ## Alternatives Considered -Another proposal for higher level `DeleteItemActions` was initially included, which would require implementors to individually download the backup tarball themselves. +Another proposal for higher level `DeleteItemActions` was initially included, which would require implementers to individually download the backup tarball themselves. While this may be useful long term, it is not a good fit for the current goals as each plugin would be re-implementing a lot of boilerplate. See the deletion-plugins.md file for this alternative proposal in more detail. diff --git a/internal/resourcemodifiers/resource_modifiers.go b/internal/resourcemodifiers/resource_modifiers.go index ef81945223..dbcd8e7ba4 100644 --- a/internal/resourcemodifiers/resource_modifiers.go +++ b/internal/resourcemodifiers/resource_modifiers.go @@ -52,7 +52,7 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error return nil, fmt.Errorf("could not parse config from nil configmap") } if len(cm.Data) != 1 { - return nil, fmt.Errorf("illegal resource modifiers %s/%s configmap", cm.Name, cm.Namespace) + return nil, fmt.Errorf("illegal resource modifiers %s/%s configmap", cm.Namespace, cm.Name) } var yamlData string diff --git a/internal/resourcepolicies/resource_policies.go b/internal/resourcepolicies/resource_policies.go index 5da2da8fc6..956a06753c 100644 --- a/internal/resourcepolicies/resource_policies.go +++ b/internal/resourcepolicies/resource_policies.go @@ -132,7 +132,7 @@ func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) { return nil, fmt.Errorf("could not parse config from nil configmap") } if len(cm.Data) != 1 { - return nil, fmt.Errorf("illegal resource policies %s/%s configmap", cm.Name, cm.Namespace) + return nil, fmt.Errorf("illegal resource policies %s/%s configmap", cm.Namespace, cm.Name) } var yamlData string diff --git a/internal/util/managercontroller/managercontroller.go b/internal/util/managercontroller/managercontroller.go deleted file mode 100644 index bb80826060..0000000000 --- a/internal/util/managercontroller/managercontroller.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2020 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO(2.0) After converting all controllers to runtime-controller, -// the functions in this file will no longer be needed and should be removed. -package managercontroller - -import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/vmware-tanzu/velero/pkg/controller" -) - -// Runnable will turn a "regular" runnable component (such as a controller) -// into a controller-runtime Runnable -func Runnable(p controller.Interface, numWorkers int) manager.Runnable { - // Pass the provided Context down to the run function. - f := func(ctx context.Context) error { - return p.Run(ctx, numWorkers) - } - return manager.RunnableFunc(f) -} diff --git a/pkg/cmd/cli/backuplocation/delete.go b/pkg/cmd/cli/backuplocation/delete.go index d34bfa8e5a..922c32df99 100644 --- a/pkg/cmd/cli/backuplocation/delete.go +++ b/pkg/cmd/cli/backuplocation/delete.go @@ -33,8 +33,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/cmd/cli" ) -const bslLabelKey = "velero.io/storage-location" - // NewDeleteCommand creates and returns a new cobra command for deleting backup-locations. func NewDeleteCommand(f client.Factory, use string) *cobra.Command { o := cli.NewDeleteOptions("backup-location") @@ -146,7 +144,7 @@ func findAssociatedBackups(client kbclient.Client, bslName, ns string) (velerov1 var backups velerov1api.BackupList err := client.List(context.Background(), &backups, &kbclient.ListOptions{ Namespace: ns, - Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName}, + Raw: &metav1.ListOptions{LabelSelector: velerov1api.StorageLocationLabel + "=" + bslName}, }) return backups, err } @@ -155,7 +153,7 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele var repos velerov1api.BackupRepositoryList err := client.List(context.Background(), &repos, &kbclient.ListOptions{ Namespace: ns, - Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName}, + Raw: &metav1.ListOptions{LabelSelector: velerov1api.StorageLocationLabel + "=" + bslName}, }) return repos, err } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index b0003f9a8d..d194ecd045 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -533,8 +533,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B if len(errors) > 0 { return nil, errors } - allLocations := &velerov1api.VolumeSnapshotLocationList{} - err := b.kbClient.List(context.Background(), allLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()}) + locations := &velerov1api.VolumeSnapshotLocationList{} + err := b.kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()}) if err != nil { errors = append(errors, fmt.Sprintf("error listing volume snapshot locations: %v", err)) return nil, errors @@ -542,8 +542,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B // build a map of provider->list of all locations for the provider allProviderLocations := make(map[string][]*velerov1api.VolumeSnapshotLocation) - for i := range allLocations.Items { - loc := allLocations.Items[i] + for i := range locations.Items { + loc := locations.Items[i] allProviderLocations[loc.Spec.Provider] = append(allProviderLocations[loc.Spec.Provider], &loc) } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 5c9168a294..af439b834f 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -570,7 +570,7 @@ func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context, } func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) { - //TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the bakcup resource is refactored + //TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored // Record original json oldData, err := json.Marshal(backup) diff --git a/site/content/docs/main/csi-snapshot-data-movement.md b/site/content/docs/main/csi-snapshot-data-movement.md index b694da9f3a..ebcaeba297 100644 --- a/site/content/docs/main/csi-snapshot-data-movement.md +++ b/site/content/docs/main/csi-snapshot-data-movement.md @@ -310,7 +310,7 @@ Velero backs up resources for CSI snapshot data movement backup in the same way - CSI plugin checks if a data movement is required, if so it creates a `DataUpload` CR and then returns to Velero backup. - Velero now is able to back up other resources, including other PVC objects. - Velero backup controller periodically queries the data movement status from CSI plugin, the period is configurable through the Velero server parameter `--item-operation-sync-frequency`, by default it is 10s. On the call, CSI plugin turns to check the phase of the `DataUpload` CRs. -- When all the `DataUpload` CRs come to a terminal state (i.e., `Completed`, `Failed` or `Cancelled`), Velero backup perists all the necessary information and finish the backup. +- When all the `DataUpload` CRs come to a terminal state (i.e., `Completed`, `Failed` or `Cancelled`), Velero backup persists all the necessary information and finish the backup. - CSI plugin expects a data mover to handle the `DataUpload` CR. If no data mover is configured for the backup, Velero built-in data mover will handle it. - If the `DataUpload` CR does not reach to the terminal state with in the given time, the `DataUpload` CR will be cancelled. You can set the timeout value per backup through the `--item-operation-timeout` parameter, the default value is `4 hours`. diff --git a/site/content/docs/v0.5.0/faq.md b/site/content/docs/v0.5.0/faq.md index c96c6d5495..b339987098 100644 --- a/site/content/docs/v0.5.0/faq.md +++ b/site/content/docs/v0.5.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.6.0/faq.md b/site/content/docs/v0.6.0/faq.md index 0fbfbbb354..07c9c9458b 100644 --- a/site/content/docs/v0.6.0/faq.md +++ b/site/content/docs/v0.6.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.7.0/faq.md b/site/content/docs/v0.7.0/faq.md index 0fbfbbb354..07c9c9458b 100644 --- a/site/content/docs/v0.7.0/faq.md +++ b/site/content/docs/v0.7.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.7.1/faq.md b/site/content/docs/v0.7.1/faq.md index 0fbfbbb354..07c9c9458b 100644 --- a/site/content/docs/v0.7.1/faq.md +++ b/site/content/docs/v0.7.1/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.8.0/faq.md b/site/content/docs/v0.8.0/faq.md index 0fbfbbb354..07c9c9458b 100644 --- a/site/content/docs/v0.8.0/faq.md +++ b/site/content/docs/v0.8.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.8.1/faq.md b/site/content/docs/v0.8.1/faq.md index 0fbfbbb354..07c9c9458b 100644 --- a/site/content/docs/v0.8.1/faq.md +++ b/site/content/docs/v0.8.1/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v1.12/csi-snapshot-data-movement.md b/site/content/docs/v1.12/csi-snapshot-data-movement.md index 667be0a48c..75fad38fa9 100644 --- a/site/content/docs/v1.12/csi-snapshot-data-movement.md +++ b/site/content/docs/v1.12/csi-snapshot-data-movement.md @@ -324,7 +324,7 @@ Velero backs up resources for CSI snapshot data movement backup in the same way - CSI plugin checks if a data movement is required, if so it creates a `DataUpload` CR and then returns to Velero backup. - Velero now is able to back up other resources, including other PVC objects. - Velero backup controller periodically queries the data movement status from CSI plugin, the period is configurable through the Velero server parameter `--item-operation-sync-frequency`, by default it is 10s. On the call, CSI plugin turns to check the phase of the `DataUpload` CRs. -- When all the `DataUpload` CRs come to a terminal state (i.e., `Completed`, `Failed` or `Cancelled`), Velero backup perists all the necessary information and finish the backup. +- When all the `DataUpload` CRs come to a terminal state (i.e., `Completed`, `Failed` or `Cancelled`), Velero backup persists all the necessary information and finish the backup. - CSI plugin expects a data mover to handle the `DataUpload` CR. If no data mover is configured for the backup, Velero built-in data mover will handle it. - If the `DataUpload` CR does not reach to the terminal state with in the given time, the `DataUpload` CR will be cancelled. You can set the timeout value per backup through the `--item-operation-timeout` parameter, the default value is `4 hours`. diff --git a/site/content/docs/v1.5/restic.md b/site/content/docs/v1.5/restic.md index 375994e8e5..8265e00030 100644 --- a/site/content/docs/v1.5/restic.md +++ b/site/content/docs/v1.5/restic.md @@ -10,7 +10,7 @@ the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Az It also provides a plugin model that enables anyone to implement additional object and block storage backends, outside the main Velero repository. -The restic intergation was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, +The restic integration was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, local, or any other volume type that doesn't have a native snapshot concept, restic might be for you. Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index 98bc3d9a46..52069863cd 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -171,7 +171,7 @@ func TTLTest() { Expect(t).To(Equal(test.ttl)) }) - By(fmt.Sprintf("Waiting %s minutes for removing backup ralated resources by GC", test.ttl.String()), func() { + By(fmt.Sprintf("Waiting %s minutes for removing backup related resources by GC", test.ttl.String()), func() { time.Sleep(test.ttl) }) diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index f0d7b414a5..90b3abf570 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -159,7 +159,7 @@ func BslDeletionTest(useVolumeSnapshots bool) { Expect(AddLabelToPod(context.Background(), "kibishii-deployment-1", bslDeletionTestNs, label_2)).To(Succeed()) }) - By("Get all 2 PVCs of Kibishii and label them seprately ", func() { + By("Get all 2 PVCs of Kibishii and label them separately ", func() { pvc, err := GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_1) Expect(err).To(Succeed()) fmt.Println(pvc)