diff --git a/cmd/cluster/install2.go b/cmd/cluster/install2.go index 054a698..cbdcfc9 100644 --- a/cmd/cluster/install2.go +++ b/cmd/cluster/install2.go @@ -46,7 +46,6 @@ func installCmd2() *cobra.Command { return err }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - fmt.Println("install validate func args", args) switch len(args) { case 2: return nil, cobra.ShellCompDirectiveDefault @@ -155,3 +154,19 @@ func GetEnv(envVar string) (bool, string) { return true, value } } + +func ReadClusterOptionsByName(cmd *cobra.Command) (utils.ClusterOptions, error) { + var ops utils.ClusterOptions + var err error + if name, _ := cmd.Flags().GetString("name"); name == "" { + return ops, fmt.Errorf("the cluster name is required") + } else if !utils.CheckClusterNameExist(name) { + return ops, fmt.Errorf("the cluster name is not existed, please install the cluster first") + } else { + ops, err = utils.LoadClusterOptionsFromFile(filepath.Join(utils.ClusterInfoDir, name)) + if err != nil { + return ops, err + } + } + return ops, err +} diff --git a/cmd/cluster/root.go b/cmd/cluster/root.go index 1c1dc3b..fe71bcf 100644 --- a/cmd/cluster/root.go +++ b/cmd/cluster/root.go @@ -15,36 +15,43 @@ package cluster import ( - "fmt" - "path/filepath" + "os" + "strings" + "github.com/fatih/color" "github.com/openGemini/gemix/pkg/cluster/manager" operator "github.com/openGemini/gemix/pkg/cluster/operation" "github.com/openGemini/gemix/pkg/cluster/spec" - "github.com/openGemini/gemix/utils" + "github.com/openGemini/gemix/pkg/gui" + "github.com/openGemini/gemix/pkg/logger" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" "github.com/spf13/cobra" "go.uber.org/zap" ) var ( - RootCmd *cobra.Command // represents the cluster command + ClusterCmd *cobra.Command // represents the cluster command gOpt operator.Options teleTopology string //lint:ignore U1000 keep this skipConfirm bool - log = zap.NewNop() // init default logger + log = logprinter.NewLogger("") // init default logger ) var openGeminiSpec *spec.SpecManager var cm *manager.Manager func init() { - RootCmd = &cobra.Command{ + logger.InitGlobalLogger() + + ClusterCmd = &cobra.Command{ Use: "cluster", Short: "Deploy an openGemini cluster for production", Long: `Deploy an openGemini cluster for production`, SilenceUsage: true, SilenceErrors: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + log.SetDisplayModeFromString(gOpt.DisplayMode) + if err := spec.Initialize("cluster"); err != nil { return err } @@ -54,7 +61,7 @@ func init() { }, } - RootCmd.AddCommand( + ClusterCmd.AddCommand( templateCmd(), installCmd(), installCmd2(), @@ -63,25 +70,42 @@ func init() { stopCmd(), stopCmd2, uninstallCmd, + newUninstallCmd(), statusCmd, upgradeCmd, ) - //RootCmd.PersistentFlags().BoolVarP(&skipConfirm, "yes", "y", false, "Skip all confirmations and assumes 'yes'") + //ClusterCmd.PersistentFlags().BoolVarP(&skipConfirm, "yes", "y", false, "Skip all confirmations and assumes 'yes'") } -func ReadClusterOptionsByName(cmd *cobra.Command) (utils.ClusterOptions, error) { - var ops utils.ClusterOptions - var err error - if name, _ := cmd.Flags().GetString("name"); name == "" { - return ops, fmt.Errorf("the cluster name is required") - } else if !utils.CheckClusterNameExist(name) { - return ops, fmt.Errorf("the cluster name is not existed, please install the cluster first") - } else { - ops, err = utils.LoadClusterOptionsFromFile(filepath.Join(utils.ClusterInfoDir, name)) - if err != nil { - return ops, err - } +// Execute executes the root command +func Execute() { + zap.L().Info("Execute command", zap.String("command", strings.Join(os.Args, " "))) + zap.L().Debug("Environment variables", zap.Strings("env", os.Environ())) + + code := 0 + err := ClusterCmd.Execute() + if err != nil { + code = 1 + } + + zap.L().Info("Execute command finished", zap.Int("code", code), zap.Error(err)) + + if err != nil { + gui.ColorErrorMsg.Fprintf(os.Stderr, "\nError: %s", err.Error()) + } + + logger.OutputDebugLog("gemix-cluster") + + err = logger.OutputAuditLogIfEnabled() + if err != nil { + zap.L().Warn("Write audit log file failed", zap.Error(err)) + code = 1 + } + + color.Unset() + + if code != 0 { + os.Exit(code) } - return ops, err } diff --git a/cmd/cluster/uninstall.go b/cmd/cluster/uninstall.go index 7453b5f..b2a5057 100644 --- a/cmd/cluster/uninstall.go +++ b/cmd/cluster/uninstall.go @@ -1,4 +1,4 @@ -// Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd. +// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -8,55 +8,65 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cluster import ( - "fmt" - - "github.com/openGemini/gemix/pkg/cluster/manager" - "github.com/openGemini/gemix/utils" + operator "github.com/openGemini/gemix/pkg/cluster/operation" + "github.com/openGemini/gemix/pkg/cluster/spec" + "github.com/openGemini/gemix/pkg/set" + "github.com/pkg/errors" "github.com/spf13/cobra" ) -// uninstallCmd represents the uninstall command -var uninstallCmd = &cobra.Command{ - Use: "uninstall", - Short: "uninstall cluster", - Long: `uninstall an openGemini cluster based on configuration files.`, - Run: func(cmd *cobra.Command, args []string) { - var ops utils.ClusterOptions - var err error - if ops, err = ReadClusterOptionsByName(cmd); err != nil { - fmt.Println(err) - fmt.Println(cmd.UsageString()) - return - } - - err = UninstallCluster(ops) - if err != nil { - fmt.Println(err) - } - }, -} +func newUninstallCmd() *cobra.Command { + destroyOpt := operator.Options{} + cmd := &cobra.Command{ + Use: "uninstall ", + Short: "Uninstall a specified cluster", + Long: `Uninstall a specified cluster, which will clean the deployment binaries and data. +`, + //You can retain some nodes and roles data when destroy cluster, see Example: + // Example: ` + //$ gemix cluster uninstall --retain-role-data grafana + //$ gemix cluster uninstall --retain-node-data 172.16.13.11:3000 + //$ gemix cluster uninstall --retain-node-data 172.16.13.12 + // `, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } -func UninstallCluster(ops utils.ClusterOptions) error { - uninstaller := manager.NewGeminiUninstaller(ops) - defer uninstaller.Close() + clusterName := args[0] - if err := uninstaller.Prepare(); err != nil { - return err - } - if err := uninstaller.Run(); err != nil { - return err + // Validate the retained roles to prevent unexpected deleting data + if len(destroyOpt.RetainDataRoles) > 0 { + roles := set.NewStringSet(spec.AllComponentNames()...) + for _, role := range destroyOpt.RetainDataRoles { + if !roles.Exist(role) { + return errors.Errorf("role name `%s` invalid", role) + } + } + } + + return cm.UninstallCluster(clusterName, gOpt, destroyOpt, skipConfirm) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + switch len(args) { + case 0: + return shellCompGetClusterName(cm, toComplete) + default: + return nil, cobra.ShellCompDirectiveNoFileComp + } + }, } - fmt.Printf("Successfully uninstalled the openGemini cluster with version : %s\n", ops.Version) - return nil -} -func init() { - uninstallCmd.Flags().StringP("name", "n", "", "cluster name") + //cmd.Flags().StringArrayVar(&destroyOpt.RetainDataNodes, "retain-node-data", nil, "Specify the nodes or hosts whose data will be retained") + //cmd.Flags().StringArrayVar(&destroyOpt.RetainDataRoles, "retain-role-data", nil, "Specify the roles whose data will be retained") + cmd.Flags().BoolVar(&destroyOpt.Force, "force", false, "Force will ignore remote error while destroy the cluster") + + return cmd } diff --git a/cmd/cluster/uninstall2.go b/cmd/cluster/uninstall2.go new file mode 100644 index 0000000..d70167e --- /dev/null +++ b/cmd/cluster/uninstall2.go @@ -0,0 +1,62 @@ +// Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "fmt" + + "github.com/openGemini/gemix/pkg/cluster/manager" + "github.com/openGemini/gemix/utils" + "github.com/spf13/cobra" +) + +// uninstallCmd represents the uninstall command +var uninstallCmd = &cobra.Command{ + Use: "uninstall2", + Short: "uninstall cluster", + Long: `uninstall an openGemini cluster based on configuration files.`, + Run: func(cmd *cobra.Command, args []string) { + var ops utils.ClusterOptions + var err error + if ops, err = ReadClusterOptionsByName(cmd); err != nil { + fmt.Println(err) + fmt.Println(cmd.UsageString()) + return + } + + err = UninstallCluster(ops) + if err != nil { + fmt.Println(err) + } + }, +} + +func UninstallCluster(ops utils.ClusterOptions) error { + uninstaller := manager.NewGeminiUninstaller(ops) + defer uninstaller.Close() + + if err := uninstaller.Prepare(); err != nil { + return err + } + if err := uninstaller.Run(); err != nil { + return err + } + fmt.Printf("Successfully uninstalled the openGemini cluster with version : %s\n", ops.Version) + return nil +} + +func init() { + uninstallCmd.Flags().StringP("name", "n", "", "cluster name") +} diff --git a/cmd/root.go b/cmd/root.go index 2ac071d..1a05815 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -15,7 +15,6 @@ package cmd import ( - "os" "time" "github.com/openGemini/gemix/cmd/cluster" @@ -34,11 +33,8 @@ var RootCmd = &cobra.Command{ } func Execute() { - RootCmd.AddCommand(cluster.RootCmd) - err := RootCmd.Execute() - if err != nil { - os.Exit(1) - } + RootCmd.AddCommand(cluster.ClusterCmd) + cluster.Execute() } var ( diff --git a/codecov.yml b/codecov.yml index 28c5407..d3e761b 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,7 +8,7 @@ coverage: status: project: default: - target: 70% + target: 10% if_ci_failed: success patch: default: diff --git a/embed/examples/cluster/local.tpl b/embed/examples/cluster/local.tpl index 4bda41b..a9f7b48 100644 --- a/embed/examples/cluster/local.tpl +++ b/embed/examples/cluster/local.tpl @@ -24,25 +24,25 @@ global: {{- end }} {{ if .TSMetaServers -}} -ts-meta-servers: +ts_meta_servers: {{- range .TSMetaServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .TSSqlServers -}} -ts-sql-servers: +ts_sql_servers: {{- range .TSSqlServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .TSStoreServers -}} -ts-store-servers: +ts_store_servers: {{- range .TSStoreServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .GrafanaServers -}} -grafana-servers: +grafana_servers: {{- range .GrafanaServers }} - host: {{ . }} {{- end }} diff --git a/embed/examples/cluster/minimal.yaml b/embed/examples/cluster/minimal.yaml index 766ca08..009d5b4 100644 --- a/embed/examples/cluster/minimal.yaml +++ b/embed/examples/cluster/minimal.yaml @@ -50,7 +50,7 @@ global: # ts-monitor: # Server configs are used to specify the configuration of ts-meta Servers. -ts-meta-servers: +ts_meta_servers: ### The ip address of the ts-meta Server. - host: 10.0.1.11 ### SSH port of the server. @@ -95,7 +95,7 @@ ts-meta-servers: # logging.level: warning ### Server configs are used to specify the configuration of ts-sql Servers. -ts-sql-servers: +ts_sql_servers: ### The ip address of the ts-sql Server. - host: 10.0.1.14 ### SSH port of the server. @@ -124,7 +124,7 @@ ts-sql-servers: # logging.level: warning ### Server configs are used to specify the configuration of ts-store Servers. -ts-store-servers: +ts_store_servers: ### The ip address of the ts-store Server. - host: 10.0.1.14 ### SSH port of the server. diff --git a/embed/examples/cluster/topology.example.yaml b/embed/examples/cluster/topology.example.yaml index 766ca08..5870aa7 100644 --- a/embed/examples/cluster/topology.example.yaml +++ b/embed/examples/cluster/topology.example.yaml @@ -14,7 +14,7 @@ global: ### openGemini Cluster data storage directory data_dir: "/gemini-data/meta" # operating system, linux/darwin. - os: "linux" + os: "linux" # Supported values: "amd64", "arm64" (default: "amd64"). arch: "amd64" # # Resource Control is used to limit the resource of an instance. @@ -31,26 +31,26 @@ global: # io_read_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # io_write_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" -### Server configs are used to specify the runtime configuration of openGemini components. -### All configuration items can be found in openGemini docs: -### - ts-meta: https://docs.opengemini.org/ -### - ts-sql: https://docs.opengemini.org/ -### - ts-store: https://docs.opengemini.org/ -### - ts-monitor: https://docs.opengemini.org/ -### -### All configuration items use points to represent the hierarchy, e.g: -### common.ha-policy -### ^ -### - example: https://github.com/openGemini/openGemini-UP/blob/main/embed/examples/cluster/topology.example.yaml -### You can overwrite this configuration via the instance-level `config` field. -# server_configs: + ### Server configs are used to specify the runtime configuration of openGemini components. + ### All configuration items can be found in openGemini docs: + ### - ts-meta: https://docs.opengemini.org/ + ### - ts-sql: https://docs.opengemini.org/ + ### - ts-store: https://docs.opengemini.org/ + ### - ts-monitor: https://docs.opengemini.org/ + ### + ### All configuration items use points to represent the hierarchy, e.g: + ### common.ha-policy + ### ^ + ### - example: https://github.com/openGemini/openGemini-UP/blob/main/embed/examples/cluster/topology.example.yaml + ### You can overwrite this configuration via the instance-level `config` field. + # server_configs: # ts-meta: # ts-sql: # ts-store: # ts-monitor: # Server configs are used to specify the configuration of ts-meta Servers. -ts-meta-servers: +ts_meta_servers: ### The ip address of the ts-meta Server. - host: 10.0.1.11 ### SSH port of the server. @@ -95,7 +95,7 @@ ts-meta-servers: # logging.level: warning ### Server configs are used to specify the configuration of ts-sql Servers. -ts-sql-servers: +ts_sql_servers: ### The ip address of the ts-sql Server. - host: 10.0.1.14 ### SSH port of the server. @@ -124,7 +124,7 @@ ts-sql-servers: # logging.level: warning ### Server configs are used to specify the configuration of ts-store Servers. -ts-store-servers: +ts_store_servers: ### The ip address of the ts-store Server. - host: 10.0.1.14 ### SSH port of the server. diff --git a/go.mod b/go.mod index 58257d9..328c2e3 100644 --- a/go.mod +++ b/go.mod @@ -15,17 +15,21 @@ require ( github.com/google/uuid v1.3.0 github.com/joomcode/errorx v1.1.1 github.com/olekukonko/tablewriter v0.0.5 + github.com/otiai10/copy v1.14.0 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 + github.com/sethvargo/go-password v0.2.0 github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.8.2 go.uber.org/atomic v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.15.0 golang.org/x/mod v0.14.0 - golang.org/x/sync v0.1.0 + golang.org/x/sync v0.3.0 golang.org/x/term v0.14.0 golang.org/x/text v0.14.0 gopkg.in/yaml.v2 v2.4.0 + software.sslmate.com/src/go-pkcs12 v0.4.0 ) require ( @@ -33,6 +37,7 @@ require ( github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/charmbracelet/harmonica v0.2.0 // indirect github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kr/fs v0.1.0 // indirect @@ -46,11 +51,13 @@ require ( github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.15.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/goleak v1.2.1 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 51cca41..9c28aa8 100644 --- a/go.sum +++ b/go.sum @@ -70,6 +70,9 @@ github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -85,25 +88,29 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= +github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -123,8 +130,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -163,3 +170,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/pkg/base52/base52.go b/pkg/base52/base52.go new file mode 100644 index 0000000..16357c1 --- /dev/null +++ b/pkg/base52/base52.go @@ -0,0 +1,55 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package base52 + +import ( + "fmt" + "strings" +) + +const ( + space = "0123456789bcdfghjkmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ" + base = len(space) +) + +// Encode returns a string by encoding the id over a 51 characters space +func Encode(id int64) string { + var short []byte + for id > 0 { + i := id % int64(base) + short = append(short, space[i]) + id /= int64(base) + } + for i, j := 0, len(short)-1; i < j; i, j = i+1, j-1 { + short[i], short[j] = short[j], short[i] + } + return string(short) +} + +// Decode will decode the string and return the id +// The input string should be a valid one with only characters in the space +func Decode(encoded string) (int64, error) { + if len(encoded) != len([]rune(encoded)) { + return 0, fmt.Errorf("invalid encoded string: '%s'", encoded) + } + var id int64 + for i := 0; i < len(encoded); i++ { + idx := strings.IndexByte(space, encoded[i]) + if idx < 0 { + return 0, fmt.Errorf("invalid encoded string: '%s' contains invalid character", encoded) + } + id = id*int64(base) + int64(idx) + } + return id, nil +} diff --git a/pkg/cluster/audit/audit.go b/pkg/cluster/audit/audit.go new file mode 100644 index 0000000..a684f4d --- /dev/null +++ b/pkg/cluster/audit/audit.go @@ -0,0 +1,315 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package audit + +import ( + "bufio" + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/fatih/color" + "github.com/openGemini/gemix/pkg/base52" + "github.com/openGemini/gemix/pkg/crypto/rand" + "github.com/openGemini/gemix/pkg/gui" + "github.com/openGemini/gemix/pkg/utils" + "github.com/pkg/errors" +) + +const ( + // EnvNameAuditID is the alternative ID appended to time based audit ID + EnvNameAuditID = "GEMIX_AUDIT_ID" +) + +// CommandArgs returns the original commands from the first line of a file +func CommandArgs(fp string) ([]string, error) { + file, err := os.Open(fp) + if err != nil { + return nil, errors.WithStack(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + return nil, errors.New("unknown audit log format") + } + + args := strings.Split(scanner.Text(), " ") + return decodeCommandArgs(args) +} + +// encodeCommandArgs encode args with url.QueryEscape +func encodeCommandArgs(args []string) []string { + encoded := []string{} + + for _, arg := range args { + encoded = append(encoded, url.QueryEscape(arg)) + } + + return encoded +} + +// decodeCommandArgs decode args with url.QueryUnescape +func decodeCommandArgs(args []string) ([]string, error) { + decoded := []string{} + + for _, arg := range args { + a, err := url.QueryUnescape(arg) + if err != nil { + return nil, errors.WithMessage(err, "failed on decode the command line of audit log") + } + decoded = append(decoded, a) + } + + return decoded, nil +} + +// ShowAuditList show the audit list. +func ShowAuditList(dir string) error { + // Header + clusterTable := [][]string{{"ID", "Time", "Command"}} + + auditList, err := GetAuditList(dir) + if err != nil { + return err + } + + for _, item := range auditList { + clusterTable = append(clusterTable, []string{ + item.ID, + item.Time, + item.Command, + }) + } + + gui.PrintTable(clusterTable, true) + return nil +} + +// Item represents a single audit item +type Item struct { + ID string `json:"id"` + Time string `json:"time"` + Command string `json:"command"` +} + +// GetAuditList get the audit item list +func GetAuditList(dir string) ([]Item, error) { + fileInfos, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + auditList := []Item{} + for _, fi := range fileInfos { + if fi.IsDir() { + continue + } + t, err := decodeAuditID(fi.Name()) + if err != nil { + continue + } + args, err := CommandArgs(filepath.Join(dir, fi.Name())) + if err != nil { + continue + } + cmd := strings.Join(args, " ") + auditList = append(auditList, Item{ + ID: fi.Name(), + Time: t.Format(time.RFC3339), + Command: cmd, + }) + } + + sort.Slice(auditList, func(i, j int) bool { + return auditList[i].Time < auditList[j].Time + }) + + return auditList, nil +} + +// OutputAuditLog outputs audit log. +func OutputAuditLog(dir, fileSuffix string, data []byte) error { + auditID := base52.Encode(time.Now().UnixNano() + rand.Int63n(1000)) + if customID := os.Getenv(EnvNameAuditID); customID != "" { + auditID = fmt.Sprintf("%s_%s", auditID, customID) + } + if fileSuffix != "" { + auditID = fmt.Sprintf("%s_%s", auditID, fileSuffix) + } + + fname := filepath.Join(dir, auditID) + f, err := os.Create(fname) + if err != nil { + return errors.WithMessage(err, "create audit log") + } + defer f.Close() + + args := encodeCommandArgs(os.Args) + if _, err := f.Write([]byte(strings.Join(args, " ") + "\n")); err != nil { + return errors.WithMessage(err, "write audit log") + } + if _, err := f.Write(data); err != nil { + return errors.WithMessage(err, "write audit log") + } + return nil +} + +// ShowAuditLog show the audit with the specified auditID +func ShowAuditLog(dir string, auditID string) error { + path := filepath.Join(dir, auditID) + if utils.IsNotExist(path) { + return errors.Errorf("cannot find the audit log '%s'", auditID) + } + + t, err := decodeAuditID(auditID) + if err != nil { + return errors.WithMessagef(err, "unrecognized audit id '%s'", auditID) + } + + content, err := os.ReadFile(path) + if err != nil { + return errors.WithStack(err) + } + + hint := fmt.Sprintf("- OPERATION TIME: %s -", t.Format("2006-01-02T15:04:05")) + line := strings.Repeat("-", len(hint)) + _, _ = os.Stdout.WriteString(color.MagentaString("%s\n%s\n%s\n", line, hint, line)) + _, _ = os.Stdout.Write(content) + return nil +} + +// decodeAuditID decodes the auditID to unix timestamp +func decodeAuditID(auditID string) (time.Time, error) { + tsID := auditID + if strings.Contains(auditID, "_") { + tsID = strings.Split(auditID, "_")[0] + } + ts, err := base52.Decode(tsID) + if err != nil { + return time.Time{}, err + } + // compatible with old second based ts + if ts>>32 > 0 { + ts /= 1e9 + } + t := time.Unix(ts, 0) + return t, nil +} + +type deleteAuditLog struct { + Files []string `json:"files"` + Size int64 `json:"size"` + Count int `json:"count"` + DelBeforeTime time.Time `json:"delete_before_time"` // audit logs before `DelBeforeTime` will be deleted +} + +// DeleteAuditLog cleanup audit log +func DeleteAuditLog(dir string, retainDays int, skipConfirm bool, displayMode string) error { + if retainDays < 0 { + return errors.Errorf("retainDays cannot be less than 0") + } + + deleteLog := &deleteAuditLog{ + Files: []string{}, + Size: 0, + Count: 0, + } + + // audit logs before `DelBeforeTime` will be deleted + oneDayDuration, _ := time.ParseDuration("-24h") + deleteLog.DelBeforeTime = time.Now().Add(oneDayDuration * time.Duration(retainDays)) + + fileInfos, err := os.ReadDir(dir) + if err != nil { + return err + } + + for _, f := range fileInfos { + if f.IsDir() { + continue + } + t, err := decodeAuditID(f.Name()) + if err != nil { + continue + } + if t.Before(deleteLog.DelBeforeTime) { + info, err := f.Info() + if err != nil { + continue + } + deleteLog.Size += info.Size() + deleteLog.Count++ + deleteLog.Files = append(deleteLog.Files, filepath.Join(dir, f.Name())) + } + } + + // output format json + if displayMode == "json" { + data, err := json.Marshal(struct { + *deleteAuditLog `json:"deleted_logs"` + }{deleteLog}) + + if err != nil { + return err + } + fmt.Println(string(data)) + } else { + // print table + fmt.Printf("Audit logs before %s will be deleted!\nFiles to be %s are:\n %s\nTotal count: %d \nTotal size: %s\n", + color.HiYellowString(deleteLog.DelBeforeTime.Format("2006-01-02T15:04:05")), + color.HiYellowString("deleted"), + strings.Join(deleteLog.Files, "\n "), + deleteLog.Count, + readableSize(deleteLog.Size), + ) + + if !skipConfirm { + if err := gui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:"); err != nil { + return err + } + } + } + + for _, f := range deleteLog.Files { + if err := os.Remove(f); err != nil { + return err + } + } + + if displayMode != "json" { + fmt.Println("clean audit log successfully") + } + + return nil +} + +func readableSize(b int64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %cB", + float64(b)/float64(div), "kMGTPE"[exp]) +} diff --git a/pkg/cluster/ctxt/context.go b/pkg/cluster/ctxt/context.go index 1de933d..e88a028 100644 --- a/pkg/cluster/ctxt/context.go +++ b/pkg/cluster/ctxt/context.go @@ -21,7 +21,6 @@ import ( "time" logprinter "github.com/openGemini/gemix/pkg/logger/printer" - "go.uber.org/zap" ) type contextKey string @@ -36,7 +35,7 @@ const ( ) type ( - // Executor is the executor interface for TiUP, all tasks will in the end + // Executor is the executor interface for gemix, all tasks will in the end // be passed to a executor and then be actually performed. Executor interface { // Execute run the command, then return it's stdout and stderr @@ -81,7 +80,7 @@ type ( ) // New create a context instance. -func New(ctx context.Context, limit int, logger *zap.Logger) context.Context { +func New(ctx context.Context, limit int, logger *logprinter.Logger) context.Context { concurrency := runtime.NumCPU() if limit > 0 { concurrency = limit diff --git a/pkg/cluster/manager/builder.go b/pkg/cluster/manager/builder.go index 47cde01..2a19314 100644 --- a/pkg/cluster/manager/builder.go +++ b/pkg/cluster/manager/builder.go @@ -20,12 +20,12 @@ import ( operator "github.com/openGemini/gemix/pkg/cluster/operation" "github.com/openGemini/gemix/pkg/cluster/spec" "github.com/openGemini/gemix/pkg/cluster/task" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" "github.com/openGemini/gemix/pkg/meta" - "go.uber.org/zap" ) // buildDownloadCompTasks build download component tasks -func buildDownloadCompTasks(clusterVersion string, topo spec.Topology) []*task.StepDisplay { +func buildDownloadCompTasks(clusterVersion string, topo spec.Topology, logger *logprinter.Logger) []*task.StepDisplay { var tasks []*task.StepDisplay uniqueTasks := make(map[string]struct{}) @@ -34,7 +34,7 @@ func buildDownloadCompTasks(clusterVersion string, topo spec.Topology) []*task.S if _, found := uniqueTasks[key]; !found { uniqueTasks[key] = struct{}{} - t := task.NewBuilder(zap.L()). + t := task.NewBuilder(logger). Download(inst.ComponentSource(), inst.OS(), inst.Arch(), clusterVersion). BuildAsStep(fmt.Sprintf(" - Download %s:%s (%s/%s)", inst.ComponentSource(), clusterVersion, inst.OS(), inst.Arch())) diff --git a/pkg/cluster/manager/install.go b/pkg/cluster/manager/install.go index d0aab55..027da43 100644 --- a/pkg/cluster/manager/install.go +++ b/pkg/cluster/manager/install.go @@ -15,347 +15,284 @@ package manager import ( - "errors" + "context" "fmt" + "os" "path/filepath" - "sync" - - "github.com/openGemini/gemix/pkg/cluster/config" - "github.com/openGemini/gemix/pkg/cluster/operation" - "github.com/openGemini/gemix/utils" - "github.com/pkg/sftp" - "golang.org/x/crypto/ssh" + "regexp" + "runtime" + "strings" + + "github.com/fatih/color" + "github.com/joomcode/errorx" + "github.com/openGemini/gemix/pkg/cluster/ctxt" + operator "github.com/openGemini/gemix/pkg/cluster/operation" + "github.com/openGemini/gemix/pkg/cluster/spec" + "github.com/openGemini/gemix/pkg/cluster/task" + "github.com/openGemini/gemix/pkg/gui" + "github.com/pkg/errors" ) -type UploadAction struct { - uploadInfo []*config.UploadInfo - remoteHost *config.RemoteHost -} +var ( + clusterNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9\-_\.]+$`) +) -type Installer interface { - PrepareForInstall() error - Install() error - Close() +// InstallOptions contains the options for install. +type InstallOptions struct { + User string // username of login to the SSH server + SkipCreateUser bool // don't create the user + IdentityFile string // path to the private key file + UsePassword bool // use password instead of identity file for ssh connection } -type GeminiInstaller struct { - version string - // ip -> remotes - remotes map[string]*config.RemoteHost - uploads map[string]*UploadAction - - // ip -> ssh clients - sshClients map[string]*ssh.Client - sftpClients map[string]*sftp.Client - - configurator config.Configurator // conf reader - executor operation.Executor // execute commands on remote host - - clusterOptions utils.ClusterOptions - - wg sync.WaitGroup +// TODO +// DeployerInstance is an instance can deploy to a target deploy directory. +type DeployerInstance interface { + Deploy(b *task.Builder, srcPath string, deployDir string, version string, name string, clusterVersion string) } -func NewGeminiInstaller(ops utils.ClusterOptions) Installer { - return &GeminiInstaller{ - remotes: make(map[string]*config.RemoteHost), - uploads: make(map[string]*UploadAction), - sshClients: make(map[string]*ssh.Client), - sftpClients: make(map[string]*sftp.Client), - version: ops.Version, - configurator: config.NewGeminiConfigurator(ops.YamlPath, filepath.Join(utils.DownloadDst, ops.Version, utils.LocalEtcRelPath, utils.LocalConfName), filepath.Join(utils.DownloadDst, ops.Version, utils.LocalEtcRelPath)), - clusterOptions: ops, +// ValidateClusterNameOrError validates a cluster name and returns error if the name is invalid. +func ValidateClusterNameOrError(n string) error { + if len(n) == 0 { + return fmt.Errorf("cluster name must not be empty") + } + if !clusterNameRegexp.MatchString(n) { + return fmt.Errorf("cluster name '%s' is invalid. The cluster name should only contain alphabets, numbers, hyphen (-), underscore (_), and dot (.)", n) } + return nil } -func (d *GeminiInstaller) PrepareForInstall() error { - var err error - if err = d.configurator.BuildConfig(); err != nil { - return err +// Install a cluster. +func (m *Manager) Install( + clusterName string, + clusterVersion string, + topoFile string, + opt InstallOptions, + skipConfirm bool, + gOpt operator.Options, +) error { + if err := ValidateClusterNameOrError(clusterName); err != nil { + return errors.WithStack(err) } - conf := d.configurator.GetConfig() - dOps := operation.DownloadOptions{ - Version: d.version, - Os: conf.CommonConfig.Os, - Arch: conf.CommonConfig.Arch, - } - downloader := operation.NewGeminiDownloader(dOps) - if err = downloader.Run(); err != nil { - return err + exist, err := m.specManager.Exist(clusterName) + if err != nil { + return errors.WithStack(err) } - if err = d.configurator.GenClusterConfs(); err != nil { - return err + if exist { + return errors.Errorf("cluster name '%s' is duplicated. Please specify another cluster name", clusterName) } - // check the internet with all the remote servers - if err = d.prepareRemotes(conf, true); err != nil { - fmt.Printf("Failed to establish SSH connections with all remote servers. The specific error is: %s\n", err) - return err + metadata := m.specManager.NewMetadata() + topo := metadata.GetTopology() + base := topo.BaseTopo() + + if err = spec.ParseTopologyYaml(topoFile, topo); err != nil { + return errors.WithStack(err) } - fmt.Println("Success to establish SSH connections with all remote servers.") - d.executor = operation.NewGeminiExecutor(d.sshClients) + spec.ExpandRelativeDir(topo) - if err = d.prepareForUpload(); err != nil { - return err + if err = checkConflict(m, clusterName, topo); err != nil { + return errors.WithStack(err) } - if err = d.prepareUploadActions(conf); err != nil { - return err + var ( + sshConnProps *gui.SSHConnectionProps + ) + if sshConnProps, err = gui.ReadIdentityFileOrPassword(opt.IdentityFile, opt.UsePassword); err != nil { + return errors.WithStack(err) } - return nil -} - -func (d *GeminiInstaller) prepareRemotes(c *config.Config, needSftp bool) error { - if c == nil { - return utils.ErrUnexpectedNil + // TODO: Detect CPU Arch Name + if err = m.fillHost(sshConnProps, topo, opt.User); err != nil { + return errors.WithStack(err) } - for ip, ssh := range c.SSHConfig { - d.remotes[ip] = &config.RemoteHost{ - Ip: ip, - SSHPort: ssh.Port, - UpDataPath: ssh.UpDataPath, - LogPath: ssh.LogPath, - User: d.clusterOptions.User, - Typ: d.clusterOptions.SshType, - Password: d.clusterOptions.Password, - KeyPath: d.clusterOptions.Key, + if !skipConfirm { + if err = m.confirmTopology(clusterName, clusterVersion, topo); err != nil { + return errors.WithStack(err) } } - if err := d.tryConnect(c, needSftp); err != nil { - return err + if err = os.MkdirAll(m.specManager.Path(clusterName), 0750); err != nil { + return errorx.InitializationFailed. + Wrap(err, "Failed to create cluster metadata directory '%s'", m.specManager.Path(clusterName)). + WithProperty(gui.SuggestionFromString("Please check file system permissions and try again.")) } - return nil -} + var ( + envInitTasks []*task.StepDisplay // tasks which are used to initialize environment + downloadCompTasks []*task.StepDisplay // tasks which are used to download components + deployCompTasks []*task.StepDisplay // tasks which are used to copy components to remote host + ) -func (d *GeminiInstaller) tryConnect(c *config.Config, needSftp bool) error { - for ip, r := range d.remotes { - var err error - var sshClient *ssh.Client - switch r.Typ { - case utils.SSH_PW: - sshClient, err = utils.NewSSH_PW(r.User, r.Password, r.Ip, r.SSHPort) - case utils.SSH_KEY: - sshClient, err = utils.NewSSH_Key(r.User, r.KeyPath, r.Ip, r.SSHPort) + // Initialize environment - } - if err != nil { - return err - } - d.sshClients[ip] = sshClient + globalOptions := base.GlobalOptions + metadata.SetUser(globalOptions.User) + metadata.SetVersion(clusterVersion) - if needSftp { - sftpClient, err := utils.NewSftpClient(sshClient) - if err != nil { - return err - } - d.sftpClients[ip] = sftpClient + // generate CA and client cert for TLS enabled cluster + //_, err = m.genAndSaveCertificate(clusterName, globalOptions) + //if err != nil { + // return err + //} - pwd, _ := sftpClient.Getwd() - // Convert relative paths to absolute paths. - if len(r.UpDataPath) > 1 && r.UpDataPath[:1] == "~" { - r.UpDataPath = filepath.Join(pwd, r.UpDataPath[1:]) - } - } - } - if needSftp { - for _, host := range c.CommonConfig.MetaHosts { - pwd, _ := d.sftpClients[host].Getwd() - confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteMetaConfName) - hostToml, _ := config.ReadFromToml(confPath) - // Convert relative paths in openGemini.conf to absolute paths. - hostToml = config.ConvertToml(hostToml, pwd) - if err := config.GenNewToml(hostToml, confPath); err != nil { - return err - } - } - for _, host := range c.CommonConfig.SqlHosts { - pwd, _ := d.sftpClients[host].Getwd() - confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteSqlConfName) - hostToml, _ := config.ReadFromToml(confPath) - // Convert relative paths in openGemini.conf to absolute paths. - hostToml = config.ConvertToml(hostToml, pwd) - if err := config.GenNewToml(hostToml, confPath); err != nil { - return err - } - } - for _, host := range c.CommonConfig.StoreHosts { - pwd, _ := d.sftpClients[host].Getwd() - confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteStoreConfName) - hostToml, _ := config.ReadFromToml(confPath) - // Convert relative paths in openGemini.conf to absolute paths. - hostToml = config.ConvertToml(hostToml, pwd) - if err := config.GenNewToml(hostToml, confPath); err != nil { - return err + uniqueHosts := getAllUniqueHosts(topo) + + for host, info := range uniqueHosts { + var dirs []string + for _, dir := range []string{globalOptions.DeployDir, globalOptions.LogDir} { + if dir == "" { + continue } - } - } - return nil -} -func (d *GeminiInstaller) prepareForUpload() error { - if d.executor == nil { - return utils.ErrUnexpectedNil - } - for ip, r := range d.remotes { - binPath := filepath.Join(r.UpDataPath, d.version, utils.RemoteBinRelPath) - etcPath := filepath.Join(r.UpDataPath, d.version, utils.RemoteEtcRelPath) - command := fmt.Sprintf("mkdir -p %s; mkdir -p %s;", binPath, etcPath) - if _, err := d.executor.ExecCommand(ip, command); err != nil { - return err + dirs = append(dirs, spec.Abs(globalOptions.User, dir)) } - } - return nil -} - -func (d *GeminiInstaller) prepareUploadActions(c *config.Config) error { - // ts-meta(bin and config files) - for _, host := range c.CommonConfig.MetaHosts { - if d.uploads[host] == nil { - d.uploads[host] = &UploadAction{ - remoteHost: d.remotes[host], - } + // the default, relative path of data dir is under deploy dir + if strings.HasPrefix(globalOptions.DataDir, "/") { + dirs = append(dirs, globalOptions.DataDir) } - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsMeta), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), - FileName: utils.TsMeta, - }) - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteMetaConfName), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), - FileName: utils.RemoteMetaConfName, - }) - } - // ts-sql(bin and config files) - for _, host := range c.CommonConfig.SqlHosts { - if d.uploads[host] == nil { - d.uploads[host] = &UploadAction{ - remoteHost: d.remotes[host], - } - } - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsSql), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), - FileName: utils.TsSql, - }) - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteSqlConfName), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), - FileName: utils.RemoteSqlConfName, - }) + t := task.NewBuilder(m.logger). + RootSSH( + host, + info.ssh, + opt.User, + sshConnProps.Password, + sshConnProps.IdentityFile, + sshConnProps.IdentityFilePassphrase, + gOpt.SSHTimeout, + gOpt.OptTimeout, + ). + UserAction(host, globalOptions.User, globalOptions.Group, opt.SkipCreateUser || globalOptions.User == opt.User). + EnvInit(host, globalOptions.User, globalOptions.Group). + Mkdir(globalOptions.User, host, dirs...). + BuildAsStep(fmt.Sprintf(" - Prepare %s:%d", host, info.ssh)) + envInitTasks = append(envInitTasks, t) } - // ts-store(bin and config files) - for _, host := range c.CommonConfig.StoreHosts { - if d.uploads[host] == nil { - d.uploads[host] = &UploadAction{ - remoteHost: d.remotes[host], - } + // Download missing component + downloadCompTasks = buildDownloadCompTasks(clusterVersion, topo, m.logger) + + var deployTasksByHosts = make(map[string]*task.Builder, len(uniqueHosts)) + // Deploy components to remote + topo.IterInstance(func(inst spec.Instance) { + deployDir := spec.Abs(globalOptions.User, inst.DeployDir()) + // data dir would be empty for components which don't need it + dataDirs := spec.Abs(globalOptions.User, inst.DataDir()) + // log dir will always be with values, but might not be used by the component + logDir := spec.Abs(globalOptions.User, inst.LogDir()) + // Deploy component + // prepare deployment server + deployDirs := []string{ + deployDir, logDir, + filepath.Join(deployDir, "bin"), + filepath.Join(deployDir, "conf"), + filepath.Join(deployDir, "scripts"), } - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsStore), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), - FileName: utils.TsStore, - }) - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteStoreConfName), - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), - FileName: utils.RemoteStoreConfName, - }) - } - // script - for host := range c.SSHConfig { - if d.uploads[host] == nil { - d.uploads[host] = &UploadAction{ - remoteHost: d.remotes[host], - } + tk, ok := deployTasksByHosts[inst.GetHost()] + if ok { + tk = tk.CopyComponent( + inst.ComponentSource(), + inst.ComponentName(), + inst.OS(), + inst.Arch(), + clusterVersion, + "", // use default srcPath + inst.GetManageHost(), + deployDir, + ) + deployTasksByHosts[inst.GetManageHost()] = tk + return } - d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ - LocalPath: utils.InstallScriptPath, - RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), - FileName: utils.InstallScript, - }) - } - - return nil -} -func (d *GeminiInstaller) Install() error { - fmt.Println("Start to install openGemini...") - errChan := make(chan error, len(d.uploads)) - var wgp sync.WaitGroup - wgp.Add(2) - - go func() { - defer wgp.Done() - d.wg.Add(len(d.uploads)) - for ip, action := range d.uploads { - go func(ip string, action *UploadAction, errChan chan error) { - defer d.wg.Done() - for _, c := range action.uploadInfo { - // check whether need to upload the file - // only support Linux - cmd := fmt.Sprintf("if [ -f %s ]; then echo 'File exists'; else echo 'File not found'; fi", filepath.Join(c.RemotePath, c.FileName)) - output, err := d.executor.ExecCommand(ip, cmd) - if string(output) == "File exists\n" && err == nil { - fmt.Printf("%s exists on %s.\n", c.FileName, c.RemotePath) - } else { - if err := utils.UploadFile(action.remoteHost.Ip, c.LocalPath, c.RemotePath, d.sftpClients[action.remoteHost.Ip]); err != nil { - fmt.Printf("upload %s to %s error: %v\n", c.LocalPath, action.remoteHost.Ip, err) - errChan <- err - } - } - } - - }(ip, action, errChan) - } - d.wg.Wait() - close(errChan) - }() - - var has_err = false - go func() { - defer wgp.Done() - for { - err, ok := <-errChan - if !ok { - break - } - fmt.Println(err) - has_err = true + t := task.NewBuilder(m.logger). // TODO: only support root deploy user + RootSSH( + inst.GetManageHost(), + inst.GetSSHPort(), + globalOptions.User, + sshConnProps.Password, + sshConnProps.IdentityFile, + sshConnProps.IdentityFilePassphrase, + gOpt.SSHTimeout, + gOpt.OptTimeout, + ). + //t := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), globalOptions.User, 0, 0). + Mkdir(globalOptions.User, inst.GetManageHost(), deployDirs...). + Mkdir(globalOptions.User, inst.GetManageHost(), dataDirs) + + if deployerInstance, ok := inst.(DeployerInstance); ok { + deployerInstance.Deploy(t, "", deployDir, clusterVersion, clusterName, clusterVersion) + } else { + // copy dependency component if needed + t = t.CopyComponent( + inst.ComponentSource(), + inst.ComponentName(), + inst.OS(), + inst.Arch(), + clusterVersion, + "", // use default srcPath + inst.GetManageHost(), + deployDir, + ) } - }() - - wgp.Wait() - if has_err { - return errors.New("install cluster failed") - } else { - return nil + // save task by host + deployTasksByHosts[inst.GetManageHost()] = t + }) + + for host, tk := range deployTasksByHosts { + deployCompTasks = append(deployCompTasks, + tk.BuildAsStep(fmt.Sprintf(" - Copy %s -> %s", "required components", host)), + ) } -} -func (d *GeminiInstaller) Close() { - var err error - for _, sftp := range d.sftpClients { - if sftp != nil { - if err = sftp.Close(); err != nil { - fmt.Println(err) - } + // generates certificate for instance and transfers it to the server + //certificateTasks, err := buildCertificateTasks(m, name, topo, metadata.GetBaseMeta(), gOpt, sshProxyProps) + //if err != nil { + // return err + //} + + refreshConfigTasks := buildInitConfigTasks(m, clusterName, topo, metadata.GetBaseMeta(), gOpt) + + builder := task.NewBuilder(m.logger). + Step("+ Generate SSH keys", + task.NewBuilder(m.logger). + SSHKeyGen(m.specManager.Path(clusterName, "ssh", "id_rsa")). + Build(), + m.logger). + ParallelStep("+ Download openGemini components", false, downloadCompTasks...). + ParallelStep("+ Initialize target host environments", false, envInitTasks...). + ParallelStep("+ Deploy openGemini instance", false, deployCompTasks...). + //ParallelStep("+ Copy certificate to remote host", gOpt.Force, certificateTasks...). + ParallelStep("+ Init instance configs", gOpt.Force, refreshConfigTasks...) + //ParallelStep("+ Init monitor configs", gOpt.Force, monitorConfigTasks...) + + t := builder.Build() + + ctx := ctxt.New( + context.Background(), + runtime.NumCPU(), + m.logger, + ) + if err = t.Execute(ctx); err != nil { + if errorx.Cast(err) != nil { + // FIXME: Map possible task errors and give suggestions. + return errors.WithStack(err) } + return errors.WithStack(err) } - for _, ssh := range d.sshClients { - if err = ssh.Close(); err != nil { - fmt.Println(err) - } + // FIXME: remove me if you finish + err = m.specManager.SaveMeta(clusterName, metadata) + if err != nil { + return err } + + hint := color.New(color.FgBlue).Sprintf("%s start %s", "gemix cluster", clusterName) + m.logger.Infof("Cluster `%s` installed successfully, you can start it with command: `%s`\n", clusterName, hint) + return nil } diff --git a/pkg/cluster/manager/install2.go b/pkg/cluster/manager/install2.go index ea7be23..d0aab55 100644 --- a/pkg/cluster/manager/install2.go +++ b/pkg/cluster/manager/install2.go @@ -15,284 +15,347 @@ package manager import ( - "context" + "errors" "fmt" - "os" "path/filepath" - "regexp" - "runtime" - "strings" - - "github.com/fatih/color" - "github.com/joomcode/errorx" - "github.com/openGemini/gemix/pkg/cluster/ctxt" - operator "github.com/openGemini/gemix/pkg/cluster/operation" - "github.com/openGemini/gemix/pkg/cluster/spec" - "github.com/openGemini/gemix/pkg/cluster/task" - "github.com/openGemini/gemix/pkg/gui" - "github.com/pkg/errors" -) + "sync" -var ( - clusterNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9\-_\.]+$`) + "github.com/openGemini/gemix/pkg/cluster/config" + "github.com/openGemini/gemix/pkg/cluster/operation" + "github.com/openGemini/gemix/utils" + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" ) -// InstallOptions contains the options for install. -type InstallOptions struct { - User string // username of login to the SSH server - SkipCreateUser bool // don't create the user - IdentityFile string // path to the private key file - UsePassword bool // use password instead of identity file for ssh connection +type UploadAction struct { + uploadInfo []*config.UploadInfo + remoteHost *config.RemoteHost } -// TODO -// DeployerInstance is an instance can deploy to a target deploy directory. -type DeployerInstance interface { - Deploy(b *task.Builder, srcPath string, deployDir string, version string, name string, clusterVersion string) +type Installer interface { + PrepareForInstall() error + Install() error + Close() } -// ValidateClusterNameOrError validates a cluster name and returns error if the name is invalid. -func ValidateClusterNameOrError(n string) error { - if len(n) == 0 { - return fmt.Errorf("cluster name must not be empty") - } - if !clusterNameRegexp.MatchString(n) { - return fmt.Errorf("cluster name '%s' is invalid. The cluster name should only contain alphabets, numbers, hyphen (-), underscore (_), and dot (.)", n) - } - return nil +type GeminiInstaller struct { + version string + // ip -> remotes + remotes map[string]*config.RemoteHost + uploads map[string]*UploadAction + + // ip -> ssh clients + sshClients map[string]*ssh.Client + sftpClients map[string]*sftp.Client + + configurator config.Configurator // conf reader + executor operation.Executor // execute commands on remote host + + clusterOptions utils.ClusterOptions + + wg sync.WaitGroup } -// Install a cluster. -func (m *Manager) Install( - clusterName string, - clusterVersion string, - topoFile string, - opt InstallOptions, - skipConfirm bool, - gOpt operator.Options, -) error { - if err := ValidateClusterNameOrError(clusterName); err != nil { - return errors.WithStack(err) +func NewGeminiInstaller(ops utils.ClusterOptions) Installer { + return &GeminiInstaller{ + remotes: make(map[string]*config.RemoteHost), + uploads: make(map[string]*UploadAction), + sshClients: make(map[string]*ssh.Client), + sftpClients: make(map[string]*sftp.Client), + version: ops.Version, + configurator: config.NewGeminiConfigurator(ops.YamlPath, filepath.Join(utils.DownloadDst, ops.Version, utils.LocalEtcRelPath, utils.LocalConfName), filepath.Join(utils.DownloadDst, ops.Version, utils.LocalEtcRelPath)), + clusterOptions: ops, } +} - exist, err := m.specManager.Exist(clusterName) - if err != nil { - return errors.WithStack(err) +func (d *GeminiInstaller) PrepareForInstall() error { + var err error + if err = d.configurator.BuildConfig(); err != nil { + return err } + conf := d.configurator.GetConfig() - if exist { - return errors.Errorf("cluster name '%s' is duplicated. Please specify another cluster name", clusterName) + dOps := operation.DownloadOptions{ + Version: d.version, + Os: conf.CommonConfig.Os, + Arch: conf.CommonConfig.Arch, + } + downloader := operation.NewGeminiDownloader(dOps) + if err = downloader.Run(); err != nil { + return err } - metadata := m.specManager.NewMetadata() - topo := metadata.GetTopology() - base := topo.BaseTopo() + if err = d.configurator.GenClusterConfs(); err != nil { + return err + } - if err = spec.ParseTopologyYaml(topoFile, topo); err != nil { - return errors.WithStack(err) + // check the internet with all the remote servers + if err = d.prepareRemotes(conf, true); err != nil { + fmt.Printf("Failed to establish SSH connections with all remote servers. The specific error is: %s\n", err) + return err } + fmt.Println("Success to establish SSH connections with all remote servers.") - spec.ExpandRelativeDir(topo) + d.executor = operation.NewGeminiExecutor(d.sshClients) - if err = checkConflict(m, clusterName, topo); err != nil { - return errors.WithStack(err) + if err = d.prepareForUpload(); err != nil { + return err } - var ( - sshConnProps *gui.SSHConnectionProps - ) - if sshConnProps, err = gui.ReadIdentityFileOrPassword(opt.IdentityFile, opt.UsePassword); err != nil { - return errors.WithStack(err) + if err = d.prepareUploadActions(conf); err != nil { + return err } - // TODO: Detect CPU Arch Name - if err = m.fillHost(sshConnProps, topo, opt.User); err != nil { - return errors.WithStack(err) + return nil +} + +func (d *GeminiInstaller) prepareRemotes(c *config.Config, needSftp bool) error { + if c == nil { + return utils.ErrUnexpectedNil } - if !skipConfirm { - if err = m.confirmTopology(clusterName, clusterVersion, topo); err != nil { - return errors.WithStack(err) + for ip, ssh := range c.SSHConfig { + d.remotes[ip] = &config.RemoteHost{ + Ip: ip, + SSHPort: ssh.Port, + UpDataPath: ssh.UpDataPath, + LogPath: ssh.LogPath, + User: d.clusterOptions.User, + Typ: d.clusterOptions.SshType, + Password: d.clusterOptions.Password, + KeyPath: d.clusterOptions.Key, } } - if err = os.MkdirAll(m.specManager.Path(clusterName), 0750); err != nil { - return errorx.InitializationFailed. - Wrap(err, "Failed to create cluster metadata directory '%s'", m.specManager.Path(clusterName)). - WithProperty(gui.SuggestionFromString("Please check file system permissions and try again.")) + if err := d.tryConnect(c, needSftp); err != nil { + return err } - var ( - envInitTasks []*task.StepDisplay // tasks which are used to initialize environment - downloadCompTasks []*task.StepDisplay // tasks which are used to download components - deployCompTasks []*task.StepDisplay // tasks which are used to copy components to remote host - ) - - // Initialize environment - - globalOptions := base.GlobalOptions - metadata.SetUser(globalOptions.User) - metadata.SetVersion(clusterVersion) + return nil +} - // generate CA and client cert for TLS enabled cluster - //_, err = m.genAndSaveCertificate(clusterName, globalOptions) - //if err != nil { - // return err - //} +func (d *GeminiInstaller) tryConnect(c *config.Config, needSftp bool) error { + for ip, r := range d.remotes { + var err error + var sshClient *ssh.Client + switch r.Typ { + case utils.SSH_PW: + sshClient, err = utils.NewSSH_PW(r.User, r.Password, r.Ip, r.SSHPort) + case utils.SSH_KEY: + sshClient, err = utils.NewSSH_Key(r.User, r.KeyPath, r.Ip, r.SSHPort) - uniqueHosts := getAllUniqueHosts(topo) + } + if err != nil { + return err + } + d.sshClients[ip] = sshClient - for host, info := range uniqueHosts { - var dirs []string - for _, dir := range []string{globalOptions.DeployDir, globalOptions.LogDir} { - if dir == "" { - continue + if needSftp { + sftpClient, err := utils.NewSftpClient(sshClient) + if err != nil { + return err } + d.sftpClients[ip] = sftpClient - dirs = append(dirs, spec.Abs(globalOptions.User, dir)) + pwd, _ := sftpClient.Getwd() + // Convert relative paths to absolute paths. + if len(r.UpDataPath) > 1 && r.UpDataPath[:1] == "~" { + r.UpDataPath = filepath.Join(pwd, r.UpDataPath[1:]) + } } - // the default, relative path of data dir is under deploy dir - if strings.HasPrefix(globalOptions.DataDir, "/") { - dirs = append(dirs, globalOptions.DataDir) + } + if needSftp { + for _, host := range c.CommonConfig.MetaHosts { + pwd, _ := d.sftpClients[host].Getwd() + confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteMetaConfName) + hostToml, _ := config.ReadFromToml(confPath) + // Convert relative paths in openGemini.conf to absolute paths. + hostToml = config.ConvertToml(hostToml, pwd) + if err := config.GenNewToml(hostToml, confPath); err != nil { + return err + } + } + for _, host := range c.CommonConfig.SqlHosts { + pwd, _ := d.sftpClients[host].Getwd() + confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteSqlConfName) + hostToml, _ := config.ReadFromToml(confPath) + // Convert relative paths in openGemini.conf to absolute paths. + hostToml = config.ConvertToml(hostToml, pwd) + if err := config.GenNewToml(hostToml, confPath); err != nil { + return err + } + } + for _, host := range c.CommonConfig.StoreHosts { + pwd, _ := d.sftpClients[host].Getwd() + confPath := filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteStoreConfName) + hostToml, _ := config.ReadFromToml(confPath) + // Convert relative paths in openGemini.conf to absolute paths. + hostToml = config.ConvertToml(hostToml, pwd) + if err := config.GenNewToml(hostToml, confPath); err != nil { + return err + } } - - t := task.NewBuilder(m.logger). - RootSSH( - host, - info.ssh, - opt.User, - sshConnProps.Password, - sshConnProps.IdentityFile, - sshConnProps.IdentityFilePassphrase, - gOpt.SSHTimeout, - gOpt.OptTimeout, - ). - UserAction(host, globalOptions.User, globalOptions.Group, opt.SkipCreateUser || globalOptions.User == opt.User). - EnvInit(host, globalOptions.User, globalOptions.Group). - Mkdir(globalOptions.User, host, dirs...). - BuildAsStep(fmt.Sprintf(" - Prepare %s:%d", host, info.ssh)) - envInitTasks = append(envInitTasks, t) } + return nil +} - // Download missing component - downloadCompTasks = buildDownloadCompTasks(clusterVersion, topo) - - var deployTasksByHosts = make(map[string]*task.Builder, len(uniqueHosts)) - // Deploy components to remote - topo.IterInstance(func(inst spec.Instance) { - deployDir := spec.Abs(globalOptions.User, inst.DeployDir()) - // data dir would be empty for components which don't need it - dataDirs := spec.Abs(globalOptions.User, inst.DataDir()) - // log dir will always be with values, but might not be used by the component - logDir := spec.Abs(globalOptions.User, inst.LogDir()) - // Deploy component - // prepare deployment server - deployDirs := []string{ - deployDir, logDir, - filepath.Join(deployDir, "bin"), - filepath.Join(deployDir, "conf"), - filepath.Join(deployDir, "scripts"), +func (d *GeminiInstaller) prepareForUpload() error { + if d.executor == nil { + return utils.ErrUnexpectedNil + } + for ip, r := range d.remotes { + binPath := filepath.Join(r.UpDataPath, d.version, utils.RemoteBinRelPath) + etcPath := filepath.Join(r.UpDataPath, d.version, utils.RemoteEtcRelPath) + command := fmt.Sprintf("mkdir -p %s; mkdir -p %s;", binPath, etcPath) + if _, err := d.executor.ExecCommand(ip, command); err != nil { + return err } + } + return nil +} - tk, ok := deployTasksByHosts[inst.GetHost()] - if ok { - tk = tk.CopyComponent( - inst.ComponentSource(), - inst.ComponentName(), - inst.OS(), - inst.Arch(), - clusterVersion, - "", // use default srcPath - inst.GetManageHost(), - deployDir, - ) - deployTasksByHosts[inst.GetManageHost()] = tk - return +func (d *GeminiInstaller) prepareUploadActions(c *config.Config) error { + // ts-meta(bin and config files) + for _, host := range c.CommonConfig.MetaHosts { + if d.uploads[host] == nil { + d.uploads[host] = &UploadAction{ + remoteHost: d.remotes[host], + } } + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsMeta), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), + FileName: utils.TsMeta, + }) + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteMetaConfName), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), + FileName: utils.RemoteMetaConfName, + }) + } - t := task.NewBuilder(m.logger). // TODO: only support root deploy user - RootSSH( - inst.GetManageHost(), - inst.GetSSHPort(), - globalOptions.User, - sshConnProps.Password, - sshConnProps.IdentityFile, - sshConnProps.IdentityFilePassphrase, - gOpt.SSHTimeout, - gOpt.OptTimeout, - ). - //t := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), globalOptions.User, 0, 0). - Mkdir(globalOptions.User, inst.GetManageHost(), deployDirs...). - Mkdir(globalOptions.User, inst.GetManageHost(), dataDirs) - - if deployerInstance, ok := inst.(DeployerInstance); ok { - deployerInstance.Deploy(t, "", deployDir, clusterVersion, clusterName, clusterVersion) - } else { - // copy dependency component if needed - t = t.CopyComponent( - inst.ComponentSource(), - inst.ComponentName(), - inst.OS(), - inst.Arch(), - clusterVersion, - "", // use default srcPath - inst.GetManageHost(), - deployDir, - ) + // ts-sql(bin and config files) + for _, host := range c.CommonConfig.SqlHosts { + if d.uploads[host] == nil { + d.uploads[host] = &UploadAction{ + remoteHost: d.remotes[host], + } } - // save task by host - deployTasksByHosts[inst.GetManageHost()] = t - }) - - for host, tk := range deployTasksByHosts { - deployCompTasks = append(deployCompTasks, - tk.BuildAsStep(fmt.Sprintf(" - Copy %s -> %s", "required components", host)), - ) + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsSql), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), + FileName: utils.TsSql, + }) + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteSqlConfName), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), + FileName: utils.RemoteSqlConfName, + }) } - // generates certificate for instance and transfers it to the server - //certificateTasks, err := buildCertificateTasks(m, name, topo, metadata.GetBaseMeta(), gOpt, sshProxyProps) - //if err != nil { - // return err - //} - - refreshConfigTasks := buildInitConfigTasks(m, clusterName, topo, metadata.GetBaseMeta(), gOpt) - - builder := task.NewBuilder(m.logger). - Step("+ Generate SSH keys", - task.NewBuilder(m.logger). - SSHKeyGen(m.specManager.Path(clusterName, "ssh", "id_rsa")). - Build(), - m.logger). - ParallelStep("+ Download openGemini components", false, downloadCompTasks...). - ParallelStep("+ Initialize target host environments", false, envInitTasks...). - ParallelStep("+ Deploy openGemini instance", false, deployCompTasks...). - //ParallelStep("+ Copy certificate to remote host", gOpt.Force, certificateTasks...). - ParallelStep("+ Init instance configs", gOpt.Force, refreshConfigTasks...) - //ParallelStep("+ Init monitor configs", gOpt.Force, monitorConfigTasks...) - - t := builder.Build() - - ctx := ctxt.New( - context.Background(), - runtime.NumCPU(), - m.logger, - ) - if err = t.Execute(ctx); err != nil { - if errorx.Cast(err) != nil { - // FIXME: Map possible task errors and give suggestions. - return errors.WithStack(err) + // ts-store(bin and config files) + for _, host := range c.CommonConfig.StoreHosts { + if d.uploads[host] == nil { + d.uploads[host] = &UploadAction{ + remoteHost: d.remotes[host], + } } - return errors.WithStack(err) + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalBinRelPath, utils.TsStore), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteBinRelPath), + FileName: utils.TsStore, + }) + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: filepath.Join(utils.DownloadDst, d.version, utils.LocalEtcRelPath, host, utils.RemoteStoreConfName), + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), + FileName: utils.RemoteStoreConfName, + }) } - // FIXME: remove me if you finish - err = m.specManager.SaveMeta(clusterName, metadata) - if err != nil { - return err + // script + for host := range c.SSHConfig { + if d.uploads[host] == nil { + d.uploads[host] = &UploadAction{ + remoteHost: d.remotes[host], + } + } + d.uploads[host].uploadInfo = append(d.uploads[host].uploadInfo, &config.UploadInfo{ + LocalPath: utils.InstallScriptPath, + RemotePath: filepath.Join(d.remotes[host].UpDataPath, d.version, utils.RemoteEtcRelPath), + FileName: utils.InstallScript, + }) } - hint := color.New(color.FgBlue).Sprintf("%s start %s", "gemix cluster", clusterName) - fmt.Printf("Cluster `%s` deployed successfully, you can start it with command: `%s`\n", clusterName, hint) return nil } + +func (d *GeminiInstaller) Install() error { + fmt.Println("Start to install openGemini...") + errChan := make(chan error, len(d.uploads)) + var wgp sync.WaitGroup + wgp.Add(2) + + go func() { + defer wgp.Done() + d.wg.Add(len(d.uploads)) + for ip, action := range d.uploads { + go func(ip string, action *UploadAction, errChan chan error) { + defer d.wg.Done() + for _, c := range action.uploadInfo { + // check whether need to upload the file + // only support Linux + cmd := fmt.Sprintf("if [ -f %s ]; then echo 'File exists'; else echo 'File not found'; fi", filepath.Join(c.RemotePath, c.FileName)) + output, err := d.executor.ExecCommand(ip, cmd) + if string(output) == "File exists\n" && err == nil { + fmt.Printf("%s exists on %s.\n", c.FileName, c.RemotePath) + } else { + if err := utils.UploadFile(action.remoteHost.Ip, c.LocalPath, c.RemotePath, d.sftpClients[action.remoteHost.Ip]); err != nil { + fmt.Printf("upload %s to %s error: %v\n", c.LocalPath, action.remoteHost.Ip, err) + errChan <- err + } + } + } + + }(ip, action, errChan) + } + d.wg.Wait() + close(errChan) + }() + + var has_err = false + go func() { + defer wgp.Done() + for { + err, ok := <-errChan + if !ok { + break + } + fmt.Println(err) + has_err = true + } + }() + + wgp.Wait() + if has_err { + return errors.New("install cluster failed") + } else { + return nil + } +} + +func (d *GeminiInstaller) Close() { + var err error + for _, sftp := range d.sftpClients { + if sftp != nil { + if err = sftp.Close(); err != nil { + fmt.Println(err) + } + } + } + + for _, ssh := range d.sshClients { + if err = ssh.Close(); err != nil { + fmt.Println(err) + } + } +} diff --git a/pkg/cluster/manager/manager.go b/pkg/cluster/manager/manager.go index b9c37ac..7a1f75b 100644 --- a/pkg/cluster/manager/manager.go +++ b/pkg/cluster/manager/manager.go @@ -23,8 +23,8 @@ import ( "github.com/openGemini/gemix/pkg/cluster/spec" "github.com/openGemini/gemix/pkg/cluster/task" "github.com/openGemini/gemix/pkg/gui" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" "github.com/pkg/errors" - "go.uber.org/zap" ) // Manager to deploy a cluster. @@ -32,11 +32,11 @@ type Manager struct { sysName string specManager *spec.SpecManager //bindVersion spec.BindVersion - logger *zap.Logger + logger *logprinter.Logger } // NewManager create a Manager. -func NewManager(sysName string, specManager *spec.SpecManager, logger *zap.Logger) *Manager { +func NewManager(sysName string, specManager *spec.SpecManager, logger *logprinter.Logger) *Manager { return &Manager{ sysName: sysName, specManager: specManager, diff --git a/pkg/cluster/manager/start.go b/pkg/cluster/manager/start.go index 02569dc..aa9a8f2 100644 --- a/pkg/cluster/manager/start.go +++ b/pkg/cluster/manager/start.go @@ -31,13 +31,12 @@ import ( "github.com/openGemini/gemix/pkg/cluster/task" "github.com/openGemini/gemix/utils" "github.com/pkg/errors" - "go.uber.org/zap" "golang.org/x/crypto/ssh" ) // StartCluster start the cluster with specified name. func (m *Manager) StartCluster(name string, gOpt operation.Options, fn ...func(b *task.Builder, metadata spec.Metadata)) error { - m.logger.Info("Starting cluster ...", zap.String("cluster name", name)) + m.logger.Infof("Starting cluster %s...", name) // check locked //if err := m.specManager.ScaleOutLockedErr(name); err != nil { @@ -85,7 +84,7 @@ func (m *Manager) StartCluster(name string, gOpt operation.Options, fn ...func(b return errors.WithStack(err) } - m.logger.Info("Started cluster successfully", zap.String("cluster name", name)) + m.logger.Infof("Started cluster `%s` successfully", name) return nil } diff --git a/pkg/cluster/manager/stop.go b/pkg/cluster/manager/stop.go index a27e843..2d4b93f 100644 --- a/pkg/cluster/manager/stop.go +++ b/pkg/cluster/manager/stop.go @@ -87,6 +87,6 @@ func (m *Manager) StopCluster( return errors.WithStack(err) } - m.logger.Info(fmt.Sprintf("Stopped cluster `%s` successfully", name)) + m.logger.Infof(fmt.Sprintf("Stopped cluster `%s` successfully", name)) return nil } diff --git a/pkg/cluster/manager/uninstall.go b/pkg/cluster/manager/uninstall.go index 6141add..6b12f41 100644 --- a/pkg/cluster/manager/uninstall.go +++ b/pkg/cluster/manager/uninstall.go @@ -1,4 +1,4 @@ -// Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd. +// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -8,182 +8,81 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package manager import ( - "errors" + "context" "fmt" - "os" - "path/filepath" - "sync" - "github.com/openGemini/gemix/pkg/cluster/config" - "github.com/openGemini/gemix/pkg/cluster/operation" - "github.com/openGemini/gemix/utils" - "golang.org/x/crypto/ssh" + "github.com/fatih/color" + "github.com/openGemini/gemix/pkg/cluster/ctxt" + operator "github.com/openGemini/gemix/pkg/cluster/operation" + "github.com/openGemini/gemix/pkg/gui" + "github.com/pkg/errors" ) -type Uninstall interface { - Prepare() error - Run() error - Close() -} - -type GeminiUninstaller struct { - // ip -> remotes - remotes map[string]*config.RemoteHost - // ip -> ssh clients - sshClients map[string]*ssh.Client - - configurator config.Configurator // conf reader - executor operation.Executor // execute commands on remote host - upDataPath map[string]string // ip->up path - - wg sync.WaitGroup - - clusterOptions utils.ClusterOptions -} - -func NewGeminiUninstaller(ops utils.ClusterOptions) Uninstall { - new := &GeminiUninstaller{ - remotes: make(map[string]*config.RemoteHost), - sshClients: make(map[string]*ssh.Client), - configurator: config.NewGeminiConfigurator(ops.YamlPath, "", ""), - upDataPath: make(map[string]string), - clusterOptions: ops, - } - return new -} - -func (s *GeminiUninstaller) Prepare() error { - var err error - if err = s.configurator.BuildConfig(); err != nil { +// UninstallCluster destroy the cluster. +func (m *Manager) UninstallCluster(name string, gOpt operator.Options, destroyOpt operator.Options, skipConfirm bool) error { + if err := ValidateClusterNameOrError(name); err != nil { return err } - conf := s.configurator.GetConfig() - if err = s.prepareRemotes(conf); err != nil { - fmt.Printf("Failed to establish SSH connections with all remote servers. The specific error is: %s\n", err) + metadata, err := m.meta(name) + if err != nil { return err } - fmt.Println("Success to establish SSH connections with all remote servers.") - s.executor = operation.NewGeminiExecutor(s.sshClients) - - return nil -} - -func (s *GeminiUninstaller) prepareRemotes(c *config.Config) error { - if c == nil { - return utils.ErrUnexpectedNil - } - - for ip, ssh := range c.SSHConfig { - s.remotes[ip] = &config.RemoteHost{ - Ip: ip, - SSHPort: ssh.Port, - User: s.clusterOptions.User, - Password: s.clusterOptions.Password, - KeyPath: s.clusterOptions.Key, - Typ: s.clusterOptions.SshType, + topo := metadata.GetTopology() + base := metadata.GetBaseMeta() + + if !skipConfirm { + fmt.Println(color.HiRedString(gui.ASCIIArtWarning)) + if err := gui.PromptForAnswerOrAbortError( + "Yes, I know my cluster and data will be deleted.", + fmt.Sprintf("This operation will destroy %s %s cluster %s and its data.", + m.sysName, + color.HiYellowString(base.Version), + color.HiYellowString(name), + )+"\nAre you sure to continue?", + ); err != nil { + return err } - - s.upDataPath[ip] = ssh.UpDataPath + m.logger.Infof("Destroying cluster...") } - if err := s.tryConnect(); err != nil { + b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) + if err != nil { return err } - - return nil -} - -func (s *GeminiUninstaller) tryConnect() error { - for ip, r := range s.remotes { - var err error - var sshClient *ssh.Client - switch r.Typ { - case utils.SSH_PW: - sshClient, err = utils.NewSSH_PW(r.User, r.Password, r.Ip, r.SSHPort) - case utils.SSH_KEY: - sshClient, err = utils.NewSSH_Key(r.User, r.KeyPath, r.Ip, r.SSHPort) - - } - if err != nil { - return err - } - s.sshClients[ip] = sshClient - } - return nil -} - -func (s *GeminiUninstaller) Run() error { - if s.executor == nil { - return utils.ErrUnexpectedNil + t := b. + Func("StopCluster", func(ctx context.Context) error { + return operator.Stop( + ctx, + topo, + operator.Options{Force: destroyOpt.Force}, + ) + }). + Func("UninstallCluster", func(ctx context.Context) error { + return operator.Destroy(ctx, topo, destroyOpt) + }). + Build() + + ctx := ctxt.New( + context.Background(), + gOpt.Concurrency, + m.logger, + ) + if err = t.Execute(ctx); err != nil { + return errors.WithStack(err) } - errChan := make(chan error, len(s.remotes)) - var wgp sync.WaitGroup - wgp.Add(2) - - go func() { - defer wgp.Done() - s.wg.Add(len(s.remotes)) - for ip := range s.remotes { - go func(ip string, errChan chan error) { - defer s.wg.Done() - filePath := filepath.Join(s.upDataPath[ip], s.clusterOptions.Version) - if filePath == "/" || filePath == "/root" { - errChan <- fmt.Errorf("can not remove %s on %s", filePath, ip) - return - } - command := fmt.Sprintf("rm -rf %s;", filePath) - _, err := s.executor.ExecCommand(ip, command) - if err != nil { - errChan <- err - } - }(ip, errChan) - } - s.wg.Wait() - close(errChan) - }() - - var has_err = false - go func() { - defer wgp.Done() - for { - err, ok := <-errChan - if !ok { - break - } - fmt.Println(err) - has_err = true - } - }() - - wgp.Wait() - if has_err { - return errors.New("uninstall cluster failed") - } else { - err := os.Remove(filepath.Join(utils.ClusterInfoDir, s.clusterOptions.Name)) - if err != nil { - return errors.New("error deleting the cluster info file") - } - return nil + if err = m.specManager.Remove(name); err != nil { + return errors.WithStack(err) } -} -func (s *GeminiUninstaller) Close() { - var err error - for _, ssh := range s.sshClients { - if ssh != nil { - if err = ssh.Close(); err != nil { - fmt.Println(err) - } - } - } + m.logger.Infof("Uninstall cluster `%s` successfully", name) + return nil } diff --git a/pkg/cluster/manager/uninstall2.go b/pkg/cluster/manager/uninstall2.go new file mode 100644 index 0000000..6141add --- /dev/null +++ b/pkg/cluster/manager/uninstall2.go @@ -0,0 +1,189 @@ +// Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/openGemini/gemix/pkg/cluster/config" + "github.com/openGemini/gemix/pkg/cluster/operation" + "github.com/openGemini/gemix/utils" + "golang.org/x/crypto/ssh" +) + +type Uninstall interface { + Prepare() error + Run() error + Close() +} + +type GeminiUninstaller struct { + // ip -> remotes + remotes map[string]*config.RemoteHost + // ip -> ssh clients + sshClients map[string]*ssh.Client + + configurator config.Configurator // conf reader + executor operation.Executor // execute commands on remote host + upDataPath map[string]string // ip->up path + + wg sync.WaitGroup + + clusterOptions utils.ClusterOptions +} + +func NewGeminiUninstaller(ops utils.ClusterOptions) Uninstall { + new := &GeminiUninstaller{ + remotes: make(map[string]*config.RemoteHost), + sshClients: make(map[string]*ssh.Client), + configurator: config.NewGeminiConfigurator(ops.YamlPath, "", ""), + upDataPath: make(map[string]string), + clusterOptions: ops, + } + return new +} + +func (s *GeminiUninstaller) Prepare() error { + var err error + if err = s.configurator.BuildConfig(); err != nil { + return err + } + conf := s.configurator.GetConfig() + + if err = s.prepareRemotes(conf); err != nil { + fmt.Printf("Failed to establish SSH connections with all remote servers. The specific error is: %s\n", err) + return err + } + fmt.Println("Success to establish SSH connections with all remote servers.") + + s.executor = operation.NewGeminiExecutor(s.sshClients) + + return nil +} + +func (s *GeminiUninstaller) prepareRemotes(c *config.Config) error { + if c == nil { + return utils.ErrUnexpectedNil + } + + for ip, ssh := range c.SSHConfig { + s.remotes[ip] = &config.RemoteHost{ + Ip: ip, + SSHPort: ssh.Port, + User: s.clusterOptions.User, + Password: s.clusterOptions.Password, + KeyPath: s.clusterOptions.Key, + Typ: s.clusterOptions.SshType, + } + + s.upDataPath[ip] = ssh.UpDataPath + } + + if err := s.tryConnect(); err != nil { + return err + } + + return nil +} + +func (s *GeminiUninstaller) tryConnect() error { + for ip, r := range s.remotes { + var err error + var sshClient *ssh.Client + switch r.Typ { + case utils.SSH_PW: + sshClient, err = utils.NewSSH_PW(r.User, r.Password, r.Ip, r.SSHPort) + case utils.SSH_KEY: + sshClient, err = utils.NewSSH_Key(r.User, r.KeyPath, r.Ip, r.SSHPort) + + } + if err != nil { + return err + } + s.sshClients[ip] = sshClient + } + return nil +} + +func (s *GeminiUninstaller) Run() error { + if s.executor == nil { + return utils.ErrUnexpectedNil + } + + errChan := make(chan error, len(s.remotes)) + var wgp sync.WaitGroup + wgp.Add(2) + + go func() { + defer wgp.Done() + s.wg.Add(len(s.remotes)) + for ip := range s.remotes { + go func(ip string, errChan chan error) { + defer s.wg.Done() + filePath := filepath.Join(s.upDataPath[ip], s.clusterOptions.Version) + if filePath == "/" || filePath == "/root" { + errChan <- fmt.Errorf("can not remove %s on %s", filePath, ip) + return + } + command := fmt.Sprintf("rm -rf %s;", filePath) + _, err := s.executor.ExecCommand(ip, command) + if err != nil { + errChan <- err + } + }(ip, errChan) + } + s.wg.Wait() + close(errChan) + }() + + var has_err = false + go func() { + defer wgp.Done() + for { + err, ok := <-errChan + if !ok { + break + } + fmt.Println(err) + has_err = true + } + }() + + wgp.Wait() + if has_err { + return errors.New("uninstall cluster failed") + } else { + err := os.Remove(filepath.Join(utils.ClusterInfoDir, s.clusterOptions.Name)) + if err != nil { + return errors.New("error deleting the cluster info file") + } + return nil + } +} + +func (s *GeminiUninstaller) Close() { + var err error + for _, ssh := range s.sshClients { + if ssh != nil { + if err = ssh.Close(); err != nil { + fmt.Println(err) + } + } + } +} diff --git a/pkg/cluster/module/shell.go b/pkg/cluster/module/shell.go new file mode 100644 index 0000000..c765667 --- /dev/null +++ b/pkg/cluster/module/shell.go @@ -0,0 +1,65 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package module + +import ( + "context" + "fmt" + + "github.com/openGemini/gemix/pkg/cluster/ctxt" +) + +const ( + defaultShell = "/bin/bash" +) + +// ShellModuleConfig is the configurations used to initialize a GemixModuleSystemd +type ShellModuleConfig struct { + Command string // the command to run + Sudo bool // whether to use root privilege to run the command + Chdir string // change working directory before running the command + UseShell bool // whether to use shell to invoke the command +} + +// ShellModule is the module used to control systemd units +type ShellModule struct { + cmd string // the built command + sudo bool +} + +// NewShellModule builds and returns a ShellModule object base on given config. +func NewShellModule(config ShellModuleConfig) *ShellModule { + cmd := config.Command + + if config.Chdir != "" { + cmd = fmt.Sprintf("cd %s && %s", + config.Chdir, cmd) + } + + if config.UseShell { + cmd = fmt.Sprintf("%s -c '%s'", + defaultShell, cmd) + } + + return &ShellModule{ + cmd: cmd, + sudo: config.Sudo, + } +} + +// Execute passes the command to executor and returns its results, the executor +// should be already initialized. +func (mod *ShellModule) Execute(ctx context.Context, exec ctxt.Executor) ([]byte, []byte, error) { + return exec.Execute(ctx, mod.cmd, mod.sudo) +} diff --git a/pkg/cluster/operation/action.go b/pkg/cluster/operation/action.go index ac66f55..5593619 100644 --- a/pkg/cluster/operation/action.go +++ b/pkg/cluster/operation/action.go @@ -26,7 +26,6 @@ import ( logprinter "github.com/openGemini/gemix/pkg/logger/printer" "github.com/openGemini/gemix/pkg/set" "github.com/pkg/errors" - "go.uber.org/zap" "golang.org/x/sync/errgroup" "golang.org/x/text/cases" "golang.org/x/text/language" @@ -118,28 +117,28 @@ func Stop( func enableInstance(ctx context.Context, ins spec.Instance, timeout uint64, isEnable bool) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) action := "disable" if isEnable { action = "enable" } - logger.Info(fmt.Sprintf("\t%s instance %s", actionPrevMsgs[action], ins.ID())) + logger.Infof("\t%s instance %s", actionPrevMsgs[action], ins.ID()) // Enable/Disable by systemd. if err := systemctl(ctx, e, ins.ServiceName(), action, timeout); err != nil { return toFailedActionError(err, action, ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } - logger.Info(fmt.Sprintf("\t%s instance %s success", actionPostMsgs[action], ins.ID())) + logger.Infof("\t%s instance %s success", actionPostMsgs[action], ins.ID()) return nil } func startInstance(ctx context.Context, ins spec.Instance, timeout uint64, tlsCfg *tls.Config) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) - logger.Info(fmt.Sprintf("\tStarting instance %s", ins.ID())) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + logger.Infof("\tStarting instance %s", ins.ID()) if err := systemctl(ctx, e, ins.ServiceName(), "start", timeout); err != nil { return toFailedActionError(err, "start", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) @@ -150,13 +149,13 @@ func startInstance(ctx context.Context, ins spec.Instance, timeout uint64, tlsCf return toFailedActionError(err, "start", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } - logger.Info(fmt.Sprintf("\tStart instance %s success", ins.ID())) + logger.Infof("\tStart instance %s success", ins.ID()) return nil } func systemctl(ctx context.Context, executor ctxt.Executor, service string, action string, timeout uint64) error { - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) c := module.SystemdModuleConfig{ Unit: service, ReloadDaemon: true, @@ -170,7 +169,7 @@ func systemctl(ctx context.Context, executor ctxt.Executor, service string, acti fmt.Println(string(stdout)) } if len(stderr) > 0 && !bytes.Contains(stderr, []byte("Created symlink ")) && !bytes.Contains(stderr, []byte("Removed symlink ")) { - logger.Error(string(stderr)) + logger.Errorf(string(stderr)) } if len(stderr) > 0 && action == "stop" { // ignore "unit not loaded" error, as this means the unit is not @@ -178,10 +177,10 @@ func systemctl(ctx context.Context, executor ctxt.Executor, service string, acti // NOTE: there will be a potential bug if the unit name is set // wrong and the real unit still remains started. if bytes.Contains(stderr, []byte(" not loaded.")) { - logger.Warn(string(stderr)) + logger.Warnf(string(stderr)) return nil // reset the error to avoid exiting } - logger.Error(string(stderr)) + logger.Errorf(string(stderr)) } return err } @@ -192,12 +191,12 @@ func EnableComponent(ctx context.Context, instances []spec.Instance, noAgentHost return nil } - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() if isEnable { - logger.Info(fmt.Sprintf("Enabling component %s", name)) + logger.Infof(fmt.Sprintf("Enabling component %s", name)) } else { - logger.Info(fmt.Sprintf("Disabling component %s", name)) + logger.Infof(fmt.Sprintf("Disabling component %s", name)) } errg, _ := errgroup.WithContext(ctx) @@ -223,9 +222,9 @@ func StartComponent(ctx context.Context, instances []spec.Instance, options Opti return nil } - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() - logger.Info(fmt.Sprintf("Starting component %s", name)) + logger.Infof(fmt.Sprintf("Starting component %s", name)) errg, _ := errgroup.WithContext(ctx) for _, ins := range instances { @@ -244,14 +243,14 @@ func StartComponent(ctx context.Context, instances []spec.Instance, options Opti func stopInstance(ctx context.Context, ins spec.Instance, timeout uint64) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) - logger.Info(fmt.Sprintf("\tStopping instance %s", ins.GetManageHost())) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + logger.Infof("\tStopping instance %s", ins.GetManageHost()) if err := systemctl(ctx, e, ins.ServiceName(), "stop", timeout); err != nil { return toFailedActionError(err, "stop", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } - logger.Info(fmt.Sprintf("\tStop %s %s success", ins.ComponentName(), ins.ID())) + logger.Infof("\tStop %s %s success", ins.ComponentName(), ins.ID()) return nil } @@ -265,9 +264,9 @@ func StopComponent(ctx context.Context, return nil } - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() - logger.Info(fmt.Sprintf("Stopping component %s", name)) + logger.Infof("Stopping component %s", name) errg, _ := errgroup.WithContext(ctx) @@ -305,12 +304,12 @@ func executeSSHCommand(ctx context.Context, action, host, command string) error if !found { return fmt.Errorf("no executor") } - logger := ctx.Value(logprinter.ContextKeyLogger).(*zap.Logger) - logger.Info(fmt.Sprintf("\t%s on %s", action, host)) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + logger.Infof("\t%s on %s", action, host) stdout, stderr, err := e.Execute(ctx, command, false) if err != nil { return errors.WithMessagef(err, "stderr: %s", string(stderr)) } - logger.Info(fmt.Sprintf("\t%s", stdout)) + logger.Infof("\t%s", stdout) return nil } diff --git a/pkg/cluster/operation/uninstall.go b/pkg/cluster/operation/uninstall.go new file mode 100644 index 0000000..c90a328 --- /dev/null +++ b/pkg/cluster/operation/uninstall.go @@ -0,0 +1,288 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package operation + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/fatih/color" + "github.com/openGemini/gemix/pkg/cluster/ctxt" + "github.com/openGemini/gemix/pkg/cluster/executor" + "github.com/openGemini/gemix/pkg/cluster/module" + "github.com/openGemini/gemix/pkg/cluster/spec" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" + "github.com/openGemini/gemix/pkg/set" + "github.com/pkg/errors" +) + +// Destroy the cluster. +func Destroy( + ctx context.Context, + cluster spec.Topology, + options Options, +) error { + coms := cluster.ComponentsByStopOrder() + + instCount := map[string]int{} + cluster.IterInstance(func(inst spec.Instance) { + instCount[inst.GetManageHost()]++ + }) + + for _, com := range coms { + insts := com.Instances() + err := DestroyComponent(ctx, insts, cluster, options) + if err != nil && !options.Force { + return errors.WithMessagef(err, "failed to destroy %s", com.Name()) + } + } + + gOpts := cluster.BaseTopo().GlobalOptions + + // Delete all global deploy directory + for host := range instCount { + if err := DeleteGlobalDirs(ctx, host, gOpts); err != nil { + return nil + } + } + + // after all things done, try to remove SSH public key + for host := range instCount { + if err := DeletePublicKey(ctx, host); err != nil { + return nil + } + } + + return nil +} + +// DeleteGlobalDirs deletes all global directories if they are empty +func DeleteGlobalDirs(ctx context.Context, host string, options *spec.GlobalOptions) error { + if options == nil { + return nil + } + + e := ctxt.GetInner(ctx).Get(host) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + logger.Infof("Clean global directories %s", host) + for _, dir := range []string{options.LogDir, options.DeployDir, options.DataDir} { + if dir == "" { + continue + } + dir = spec.Abs(options.User, dir) + + logger.Infof("\tClean directory %s on instance %s", dir, host) + + c := module.ShellModuleConfig{ + Command: fmt.Sprintf("rmdir %s > /dev/null 2>&1 || true", dir), + Chdir: "", + UseShell: false, + } + shell := module.NewShellModule(c) + stdout, stderr, err := shell.Execute(ctx, e) + + if len(stdout) > 0 { + fmt.Println(string(stdout)) + } + if len(stderr) > 0 { + logger.Errorf(string(stderr)) + } + + if err != nil { + return errors.WithMessagef(err, "failed to clean directory %s on: %s", dir, host) + } + } + + logger.Infof("Clean global directories %s success", host) + return nil +} + +// DeletePublicKey deletes the SSH public key from host +func DeletePublicKey(ctx context.Context, host string) error { + e := ctxt.GetInner(ctx).Get(host) + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + logger.Infof("Delete public key %s", host) + _, pubKeyPath := ctxt.GetInner(ctx).GetSSHKeySet() + publicKey, err := os.ReadFile(pubKeyPath) + if err != nil { + return errors.WithStack(err) + } + + pubKey := string(bytes.TrimSpace(publicKey)) + pubKey = strings.ReplaceAll(pubKey, "/", "\\/") + pubKeysFile := executor.FindSSHAuthorizedKeysFile(ctx, e) + + // delete the public key with Linux `sed` toolkit + c := module.ShellModuleConfig{ + Command: fmt.Sprintf("sed -i '/%s/d' %s", pubKey, pubKeysFile), + UseShell: false, + } + shell := module.NewShellModule(c) + stdout, stderr, err := shell.Execute(ctx, e) + + if len(stdout) > 0 { + fmt.Println(string(stdout)) + } + if len(stderr) > 0 { + logger.Errorf(string(stderr)) + } + + if err != nil { + return errors.WithMessagef(err, "failed to delete pulblic key on: %s", host) + } + + logger.Infof("Delete public key %s success", host) + return nil +} + +// CleanupComponent cleanup the instances +func CleanupComponent(ctx context.Context, delFileMaps map[string]set.StringSet) error { + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + for host, delFiles := range delFileMaps { + e := ctxt.GetInner(ctx).Get(host) + logger.Infof("Cleanup instance %s", host) + logger.Debugf("Deleting paths on %s: %s", host, strings.Join(delFiles.Slice(), " ")) + c := module.ShellModuleConfig{ + Command: fmt.Sprintf("rm -rf %s;", strings.Join(delFiles.Slice(), " ")), + Sudo: true, // the .service files are in a directory owned by root + Chdir: "", + UseShell: true, + } + shell := module.NewShellModule(c) + stdout, stderr, err := shell.Execute(ctx, e) + + if len(stdout) > 0 { + fmt.Println(string(stdout)) + } + if len(stderr) > 0 { + logger.Errorf(string(stderr)) + } + + if err != nil { + return errors.WithMessagef(err, "failed to cleanup: %s", host) + } + + logger.Infof("Cleanup %s success", host) + } + + return nil +} + +// DestroyComponent destroy the instances. +func DestroyComponent(ctx context.Context, instances []spec.Instance, cls spec.Topology, options Options) error { + if len(instances) == 0 { + return nil + } + + logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) + name := instances[0].ComponentName() + fmt.Printf("Destroying component %s\n", name) + logger.Infof("Destroying component %s\n", name) + + retainDataRoles := set.NewStringSet(options.RetainDataRoles...) + retainDataNodes := set.NewStringSet(options.RetainDataNodes...) + + for _, ins := range instances { + // Some data of instances will be retained + dataRetained := retainDataRoles.Exist(ins.ComponentName()) || + retainDataNodes.Exist(ins.ID()) || retainDataNodes.Exist(ins.GetHost()) || retainDataRoles.Exist(ins.GetManageHost()) + + e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) + logger.Infof("\tDestroying instance %s\n", ins.GetManageHost()) + + var dataDirs []string + if len(ins.DataDir()) > 0 { + dataDirs = strings.Split(ins.DataDir(), ",") + } + + deployDir := ins.DeployDir() + delPaths := set.NewStringSet() + + // Retain the deploy directory if the users want to retain the data directory + // and the data directory is a sub-directory of deploy directory + keepDeployDir := false + + for _, dataDir := range dataDirs { + // Don't delete the parent directory if any sub-directory retained + keepDeployDir = (dataRetained && strings.HasPrefix(dataDir, deployDir)) || keepDeployDir + if !dataRetained && cls.CountDir(ins.GetManageHost(), dataDir) == 1 { + // only delete path if it is not used by any other instance in the cluster + delPaths.Insert(dataDir) + } + } + + logDir := ins.LogDir() + + if keepDeployDir { + delPaths.Insert(filepath.Join(deployDir, "conf")) + delPaths.Insert(filepath.Join(deployDir, "bin")) + delPaths.Insert(filepath.Join(deployDir, "scripts")) + //if cls.BaseTopo().GlobalOptions.TLSEnabled { + // delPaths.Insert(filepath.Join(deployDir, spec.TLSCertKeyDir)) + //} + // only delete path if it is not used by any other instance in the cluster + if strings.HasPrefix(logDir, deployDir) && cls.CountDir(ins.GetManageHost(), logDir) == 1 { + delPaths.Insert(logDir) + } + } else { + // only delete path if it is not used by any other instance in the cluster + if cls.CountDir(ins.GetManageHost(), logDir) == 1 { + delPaths.Insert(logDir) + } + if cls.CountDir(ins.GetManageHost(), ins.DeployDir()) == 1 { + delPaths.Insert(ins.DeployDir()) + } + } + + // check for deploy dir again, to avoid unused files being left on disk + dpCnt := 0 + for _, dir := range delPaths.Slice() { + if strings.HasPrefix(dir, deployDir+"/") { // only check subdir of deploy dir + dpCnt++ + } + } + if cls.CountDir(ins.GetManageHost(), deployDir)-dpCnt == 1 { + delPaths.Insert(deployDir) + } + + if svc := ins.ServiceName(); svc != "" { + delPaths.Insert(fmt.Sprintf("/etc/systemd/system/%s", svc)) + } + logger.Debugf("Deleting paths on %s: %s\n", ins.GetManageHost(), strings.Join(delPaths.Slice(), " ")) + for _, delPath := range delPaths.Slice() { + c := module.ShellModuleConfig{ + Command: fmt.Sprintf("rm -rf %s;", delPath), + Sudo: true, // the .service files are in a directory owned by root + Chdir: "", + UseShell: false, + } + shell := module.NewShellModule(c) + _, _, err := shell.Execute(ctx, e) + + if err != nil { + // Ignore error and continue.For example, deleting a mount point will result in a "Device or resource busy" error. + logger.Warnf(color.YellowString("Warn: failed to delete path \"%s\" on %s.Please check this error message and manually delete if necessary.\nerrmsg: %s", delPath, ins.GetManageHost(), err)) + } + } + + logger.Infof("Destroy %s finished\n", ins.GetManageHost()) + logger.Infof("- Destroy %s paths: %v\n", ins.ComponentName(), delPaths.Slice()) + } + + return nil +} diff --git a/pkg/cluster/spec/profile.go b/pkg/cluster/spec/profile.go index faeadd6..a733c13 100644 --- a/pkg/cluster/spec/profile.go +++ b/pkg/cluster/spec/profile.go @@ -75,7 +75,7 @@ func Initialize(base string) error { return os.MkdirAll(profileDir, 0750) } -// ProfileDir returns the full profile directory path of TiUP. +// ProfileDir returns the full profile directory path of gemix. func ProfileDir() string { return profileDir } diff --git a/pkg/cluster/spec/spec.go b/pkg/cluster/spec/spec.go index 3c1d394..695db8f 100644 --- a/pkg/cluster/spec/spec.go +++ b/pkg/cluster/spec/spec.go @@ -72,12 +72,12 @@ type ( GlobalOptions GlobalOptions `yaml:"global,omitempty" validate:"global:editable"` //MonitoredOptions MonitoredOptions `yaml:"monitored,omitempty" validate:"monitored:editable"` ServerConfigs ServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` - TSMetaServers []*TSMetaSpec `yaml:"ts-meta-servers"` - TSSqlServers []*TSSqlSpec `yaml:"ts-sql-servers"` - TSStoreServers []*TSStoreSpec `yaml:"ts-store-servers"` + TSMetaServers []*TSMetaSpec `yaml:"ts_meta_servers"` + TSSqlServers []*TSSqlSpec `yaml:"ts_sql_servers"` + TSStoreServers []*TSStoreSpec `yaml:"ts_store_servers"` //DashboardServers []*DashboardSpec `yaml:"opengemini_dashboard_servers,omitempty"` //Monitors []*PrometheusSpec `yaml:"monitoring_servers"` - //Grafanas []*GrafanaSpec `yaml:"grafana-servers,omitempty"` + //Grafanas []*GrafanaSpec `yaml:"grafana_servers,omitempty"` } ) @@ -93,7 +93,7 @@ type Topology interface { ComponentsByStopOrder() []Component //ComponentsByUpdateOrder(curVer string) []Component IterInstance(fn func(instance Instance), concurrency ...int) - //CountDir(host string, dir string) int // count how many time a path is used by instances in cluster + CountDir(host string, dir string) int // count how many time a path is used by instances in cluster //TLSConfig(dir string) (*tls.Config, error) //Merge(that Topology) Topology // TODO: for update FillHostArchOrOS(hostArchmap map[string]string, fullType FullHostType) error diff --git a/pkg/cluster/spec/spec_manager.go b/pkg/cluster/spec/spec_manager.go index 6afeadd..43ec506 100644 --- a/pkg/cluster/spec/spec_manager.go +++ b/pkg/cluster/spec/spec_manager.go @@ -134,6 +134,11 @@ func (s *SpecManager) Exist(clusterName string) (exist bool, err error) { return true, nil } +// Remove removes the data with specified cluster name. +func (s *SpecManager) Remove(clusterName string) error { + return os.RemoveAll(s.Path(clusterName)) +} + // ensureDir ensures that the cluster directory exists. func (s *SpecManager) ensureDir(clusterName string) error { if err := os.MkdirAll(s.Path(clusterName), 0750); err != nil { diff --git a/pkg/cluster/spec/validate.go b/pkg/cluster/spec/validate.go index ee5bec1..d896c7b 100644 --- a/pkg/cluster/spec/validate.go +++ b/pkg/cluster/spec/validate.go @@ -14,6 +14,105 @@ package spec +import ( + "path/filepath" + "reflect" + "strings" +) + +func findField(v reflect.Value, fieldName string) (int, bool) { + for i := 0; i < reflect.Indirect(v).NumField(); i++ { + if reflect.Indirect(v).Type().Field(i).Name == fieldName { + return i, true + } + } + return -1, false +} + +// CountDir counts for dir paths used by any instance in the cluster with the same +// prefix, useful to find potential path conflicts +func (s *Specification) CountDir(targetHost, dirPrefix string) int { + dirTypes := []string{ + "DeployDir", + "DataDir", + "LogDir", + } + + // path -> count + dirStats := make(map[string]int) + count := 0 + topoSpec := reflect.ValueOf(s).Elem() + dirPrefix = Abs(s.GlobalOptions.User, dirPrefix) + + addHostDir := func(host, deployDir, dir string) { + if !strings.HasPrefix(dir, "/") { + dir = filepath.Join(deployDir, dir) + } + dir = Abs(s.GlobalOptions.User, dir) + dirStats[host+dir]++ + } + + for i := 0; i < topoSpec.NumField(); i++ { + if isSkipField(topoSpec.Field(i)) { + continue + } + + compSpecs := topoSpec.Field(i) + for index := 0; index < compSpecs.Len(); index++ { + compSpec := reflect.Indirect(compSpecs.Index(index)) + deployDir := compSpec.FieldByName("DeployDir").String() + host := compSpec.FieldByName("Host").String() + if compSpec.FieldByName("ManageHost").String() != "" { + host = compSpec.FieldByName("ManageHost").String() + } + + for _, dirType := range dirTypes { + j, found := findField(compSpec, dirType) + if !found { + continue + } + + dir := compSpec.Field(j).String() + + switch dirType { // the same as in instance.go for (*instance) + case "DeployDir": + addHostDir(host, deployDir, "") + case "DataDir": + // the default data_dir is relative to deploy_dir + if dir == "" { + addHostDir(host, deployDir, dir) + continue + } + for _, dataDir := range strings.Split(dir, ",") { + dataDir = strings.TrimSpace(dataDir) + if dataDir != "" { + addHostDir(host, deployDir, dataDir) + } + } + case "LogDir": + field := compSpec.FieldByName("LogDir") + if field.IsValid() { + dir = field.Interface().(string) + } + + if dir == "" { + dir = "log" + } + addHostDir(host, deployDir, strings.TrimSpace(dir)) + } + } + } + } + + for k, v := range dirStats { + if k == targetHost+dirPrefix || strings.HasPrefix(k, targetHost+dirPrefix+"/") { + count += v + } + } + + return count +} + // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) func (s *Specification) Validate() error { diff --git a/pkg/cluster/task/builder.go b/pkg/cluster/task/builder.go index 119b59d..b7eecec 100644 --- a/pkg/cluster/task/builder.go +++ b/pkg/cluster/task/builder.go @@ -17,18 +17,18 @@ import ( "context" "github.com/openGemini/gemix/pkg/cluster/spec" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" "github.com/openGemini/gemix/pkg/meta" - "go.uber.org/zap" ) -// Builder is used to build TiUP task +// Builder is used to build task type Builder struct { tasks []Task - Logger *zap.Logger + Logger *logprinter.Logger } // NewBuilder returns a *Builder instance -func NewBuilder(logger *zap.Logger) *Builder { +func NewBuilder(logger *logprinter.Logger) *Builder { return &Builder{Logger: logger} } @@ -49,7 +49,7 @@ func (b *Builder) RootSSH( } // NewSimpleUerSSH append a UserSSH task to the current task collection with operator.Options and SSHConnectionProps -func NewSimpleUerSSH(logger *zap.Logger, host string, port int, user string, sshTimeout, exeTimeout uint64) *Builder { +func NewSimpleUerSSH(logger *logprinter.Logger, host string, port int, user string, sshTimeout, exeTimeout uint64) *Builder { return NewBuilder(logger). UserSSH( host, @@ -226,7 +226,7 @@ func (b *Builder) Build() Task { } // Step appends a new StepDisplay task, which will print single line progress for inner tasks. -func (b *Builder) Step(prefix string, inner Task, logger *zap.Logger) *Builder { +func (b *Builder) Step(prefix string, inner Task, logger *logprinter.Logger) *Builder { b.Serial(newStepDisplay(prefix, inner, logger)) return b } diff --git a/pkg/cluster/task/step.go b/pkg/cluster/task/step.go index 2ac2725..fcd645b 100644 --- a/pkg/cluster/task/step.go +++ b/pkg/cluster/task/step.go @@ -20,7 +20,7 @@ import ( tea "github.com/charmbracelet/bubbletea" "github.com/openGemini/gemix/pkg/cluster/ctxt" "github.com/openGemini/gemix/pkg/gui/progress" - "go.uber.org/zap" + logprinter "github.com/openGemini/gemix/pkg/logger/printer" ) // StepDisplay is a task that will display a progress bar for inner task. @@ -29,7 +29,7 @@ type StepDisplay struct { inner Task prefix string children map[Task]struct{} - Logger *zap.Logger + Logger *logprinter.Logger teaProgram *tea.Program } @@ -56,7 +56,7 @@ func addChildren(m map[Task]struct{}, task Task) { } } -func newStepDisplay(prefix string, inner Task, logger *zap.Logger) *StepDisplay { +func newStepDisplay(prefix string, inner Task, logger *logprinter.Logger) *StepDisplay { children := make(map[Task]struct{}) addChildren(children, inner) return &StepDisplay{ @@ -70,7 +70,7 @@ func newStepDisplay(prefix string, inner Task, logger *zap.Logger) *StepDisplay } // SetLogger set the logger of step -func (s *StepDisplay) SetLogger(logger *zap.Logger) *StepDisplay { +func (s *StepDisplay) SetLogger(logger *logprinter.Logger) *StepDisplay { s.Logger = logger return s } @@ -121,7 +121,7 @@ func (s *StepDisplay) handleTaskProgress(task Task, p string) { type ParallelStepDisplay struct { inner *Parallel prefix string - Logger *zap.Logger + Logger *logprinter.Logger } func newParallelStepDisplay(prefix string, ignoreError bool, sdTasks ...*StepDisplay) *ParallelStepDisplay { @@ -136,7 +136,7 @@ func newParallelStepDisplay(prefix string, ignoreError bool, sdTasks ...*StepDis } // SetLogger set the logger of step -func (ps *ParallelStepDisplay) SetLogger(logger *zap.Logger) *ParallelStepDisplay { +func (ps *ParallelStepDisplay) SetLogger(logger *logprinter.Logger) *ParallelStepDisplay { ps.Logger = logger return ps } diff --git a/pkg/crypto/ca.go b/pkg/crypto/ca.go new file mode 100644 index 0000000..f76eaa3 --- /dev/null +++ b/pkg/crypto/ca.go @@ -0,0 +1,182 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import ( + cr "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "os" + "time" + + "github.com/openGemini/gemix/pkg/crypto/rand" + "github.com/pkg/errors" +) + +var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) + +// CertificateAuthority holds the CA of a cluster +type CertificateAuthority struct { + ClusterName string + Cert *x509.Certificate + Key PrivKey +} + +// NewCA generates a new CertificateAuthority object +func NewCA(clsName string) (*CertificateAuthority, error) { + currTime := time.Now().UTC() + + // generate a random serial number for the new ca + serialNumber, err := cr.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + caTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + // NOTE: not adding cluster name to the cert subject to avoid potential issues + // when we implement cluster renaming feature. We may consider add this back + // if we find proper way renaming a TLS enabled cluster. + // Adding the cluster name in cert subject may be helpful to diagnose problem + // when a process is trying to connecting a component from another cluster. + Subject: pkix.Name{ + Organization: []string{pkixOrganization}, + OrganizationalUnit: []string{pkixOrganizationalUnit /*, clsName */}, + }, + NotBefore: currTime, + NotAfter: currTime.Add(time.Hour * 24 * 365 * 50), // TODO: support ca cert rotate + IsCA: true, // must be true + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + BasicConstraintsValid: true, + } + + priv, err := NewKeyPair(KeyTypeRSA, KeySchemeRSASSAPSSSHA256) + if err != nil { + return nil, err + } + caBytes, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, priv.Public().Key(), priv.Signer()) + if err != nil { + return nil, err + } + caCert, err := x509.ParseCertificate(caBytes) + if err != nil { + return nil, err + } + return &CertificateAuthority{ + ClusterName: clsName, + Cert: caCert, + Key: priv, + }, nil +} + +// Sign signs a CSR with the CA +func (ca *CertificateAuthority) Sign(csrBytes []byte) ([]byte, error) { + csr, err := x509.ParseCertificateRequest(csrBytes) + if err != nil { + return nil, err + } + if err := csr.CheckSignature(); err != nil { + return nil, err + } + + currTime := time.Now().UTC() + if !currTime.Before(ca.Cert.NotAfter) { + return nil, errors.Errorf("the signer has expired: NotAfter=%v", ca.Cert.NotAfter) + } + + // generate a random serial number for the new cert + serialNumber, err := cr.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + template := &x509.Certificate{ + Signature: csr.Signature, + SignatureAlgorithm: csr.SignatureAlgorithm, + PublicKey: csr.PublicKey, + PublicKeyAlgorithm: csr.PublicKeyAlgorithm, + + SerialNumber: serialNumber, + Issuer: ca.Cert.Issuer, + Subject: csr.Subject, + DNSNames: csr.DNSNames, + IPAddresses: csr.IPAddresses, + EmailAddresses: csr.EmailAddresses, + URIs: csr.URIs, + NotBefore: currTime, + NotAfter: currTime.Add(time.Hour * 24 * 365 * 10), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + Extensions: csr.Extensions, + ExtraExtensions: csr.ExtraExtensions, + } + + return x509.CreateCertificate(rand.Reader, template, ca.Cert, csr.PublicKey, ca.Key.Signer()) +} + +// ReadCA reads an existing CA certificate from disk +func ReadCA(clsName, certPath, keyPath string) (*CertificateAuthority, error) { + // read private key + rawKey, err := os.ReadFile(keyPath) + if err != nil { + return nil, errors.WithMessagef(err, "error reading CA private key for %s", clsName) + } + keyPem, _ := pem.Decode(rawKey) + if keyPem == nil { + return nil, errors.Errorf("error decoding CA private key for %s", clsName) + } + var privKey PrivKey + switch keyPem.Type { + case "RSA PRIVATE KEY": + pk, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes) + if err != nil { + return nil, errors.WithMessagef(err, "error decoding CA private key for %s", clsName) + } + privKey = &RSAPrivKey{key: pk} + default: + return nil, errors.Errorf("the CA private key type \"%s\" is not supported", keyPem.Type) + } + + // read certificate + rawCert, err := os.ReadFile(certPath) + if err != nil { + return nil, errors.WithMessagef(err, "error reading CA certificate for %s", clsName) + } + certPem, _ := pem.Decode(rawCert) + if certPem == nil { + return nil, errors.Errorf("error decoding CA certificate for %s", clsName) + } + if certPem.Type != "CERTIFICATE" { + return nil, errors.Errorf("the CA certificate type \"%s\" is not valid", certPem.Type) + } + cert, err := x509.ParseCertificate(certPem.Bytes) + if err != nil { + return nil, errors.WithMessagef(err, "error decoding CA certificate for %s", clsName) + } + + return &CertificateAuthority{ + ClusterName: clsName, + Cert: cert, + Key: privKey, + }, nil +} diff --git a/pkg/crypto/ca_test.go b/pkg/crypto/ca_test.go new file mode 100644 index 0000000..783c8a2 --- /dev/null +++ b/pkg/crypto/ca_test.go @@ -0,0 +1,128 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import ( + "crypto/x509" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewCA(t *testing.T) { + clsName := "testing-ca" + ca, err := NewCA(clsName) + assert.Nil(t, err) + assert.NotEmpty(t, ca.Cert) + assert.NotEmpty(t, ca.Key) + + // check if it's a CA cert + assert.True(t, ca.Cert.IsCA) + + // check for key subject + assert.NotEmpty(t, ca.Cert.Subject.Organization) + assert.Equal(t, pkixOrganization, ca.Cert.Subject.Organization[0]) + assert.NotEmpty(t, ca.Cert.Subject.OrganizationalUnit) + assert.Equal(t, pkixOrganizationalUnit, ca.Cert.Subject.OrganizationalUnit[0]) + // assert.Equal(t, clsName, ca.Cert.Subject.OrganizationalUnit[1]) + + // check for key usage + assert.Equal(t, x509.KeyUsage(33), ca.Cert.KeyUsage) // x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature + + // check for extended usage + err = func(cert *x509.Certificate) error { + for _, usage := range []x509.ExtKeyUsage{ // expected extended key usage list + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + } { + if func(a x509.ExtKeyUsage, s []x509.ExtKeyUsage) bool { + for _, u := range s { + if u == a { + return true + } + } + return false + }(usage, cert.ExtKeyUsage) { + continue + } + return fmt.Errorf("extended key usage %v not found in generated CA cert", usage) + } + return nil + }(ca.Cert) + assert.Nil(t, err) +} + +func TestCASign(t *testing.T) { + // generate ca + ca, err := NewCA("testing-ca") + assert.Nil(t, err) + + // generate cert + privKey, err := NewKeyPair(KeyTypeRSA, KeySchemeRSASSAPSSSHA256) + assert.Nil(t, err) + + csr, err := privKey.CSR("gemix", "testing-cn", + []string{ + "ts-sql", + "ts-sql.server.local", + }, []string{ + "10.0.0.1", + "1.2.3.4", + "fe80:2333::dead:beef", + "2403:5180:5:c37d::", + }) + assert.Nil(t, err) + + certBytes, err := ca.Sign(csr) + assert.Nil(t, err) + cert, err := x509.ParseCertificate(certBytes) + assert.Nil(t, err) + + assert.False(t, cert.IsCA) + assert.Equal(t, ca.Cert.Issuer, cert.Issuer) + assert.Equal(t, []string{pkixOrganization}, cert.Subject.Organization) + assert.Equal(t, []string{pkixOrganizationalUnit, "gemix"}, cert.Subject.OrganizationalUnit) + assert.Equal(t, "testing-cn", cert.Subject.CommonName) + assert.Equal(t, []string{"ts-sql", "ts-sql.server.local"}, cert.DNSNames) + assert.Equal(t, "10.0.0.1", cert.IPAddresses[0].String()) + assert.Equal(t, "1.2.3.4", cert.IPAddresses[1].String()) + assert.Equal(t, "fe80:2333::dead:beef", cert.IPAddresses[2].String()) + assert.Equal(t, "2403:5180:5:c37d::", cert.IPAddresses[3].String()) + + // check for key usage + assert.Equal(t, x509.KeyUsage(5), cert.KeyUsage) // x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment + + // check for extended usage + err = func(cert *x509.Certificate) error { + for _, usage := range []x509.ExtKeyUsage{ // expected extended key usage list + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + } { + if func(a x509.ExtKeyUsage, s []x509.ExtKeyUsage) bool { + for _, u := range s { + if u == a { + return true + } + } + return false + }(usage, cert.ExtKeyUsage) { + continue + } + return fmt.Errorf("extended key usage %v not found in signed cert", usage) + } + return nil + }(cert) + assert.Nil(t, err) +} diff --git a/pkg/crypto/keys.go b/pkg/crypto/keys.go new file mode 100644 index 0000000..28f1091 --- /dev/null +++ b/pkg/crypto/keys.go @@ -0,0 +1,139 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import ( + "crypto" + "crypto/x509" + "errors" +) + +var ( + // ErrorKeyUninitialized will be present when key is used before Deserialize called + ErrorKeyUninitialized = errors.New("key not initialized, call Deserialize first") + // ErrorDeserializeKey means the key format is not valid + ErrorDeserializeKey = errors.New("error on deserialize key, check if the key is valid") + // ErrorUnsupportedKeyType means we don't supported this type of key + ErrorUnsupportedKeyType = errors.New("provided key type not supported") + // ErrorUnsupportedKeySchema means we don't support this schema + ErrorUnsupportedKeySchema = errors.New("provided schema not supported") +) + +const ( + // KeyTypeRSA represents the RSA type of keys + KeyTypeRSA = "rsa" + + // KeySchemeRSASSAPSSSHA256 represents rsassa-pss-sha256 scheme + KeySchemeRSASSAPSSSHA256 = "rsassa-pss-sha256" + + // strings used for cert subject + pkixOrganization = "openGemini" + pkixOrganizationalUnit = "gemix" + + // PKCS12Password is a hard-coded password for PKCS#12 file, it is by + // intend to use pre-defined string instead of generated every time, + // as the encryption of PKCS#12 it self is weak. The key should be + // protected by other means. + PKCS12Password = "gemix" +) + +// Serializable represents object that can be serialized and deserialized +type Serializable interface { + // Translate the key to the format that can be stored + Serialize() ([]byte, error) + + // Deserialize a key from data + Deserialize([]byte) error +} + +// PubKey is a public key available to gemix +type PubKey interface { + Serializable + // Type returns the type of the key, e.g. RSA + Type() string + // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 + Scheme() string + // Key returns the raw public key + Key() crypto.PublicKey + // VerifySignature check the signature is right + VerifySignature(payload []byte, sig string) error +} + +// PrivKey is the private key that provide signature method +type PrivKey interface { + Serializable + // Type returns the type of the key, e.g. RSA + Type() string + // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 + Scheme() string + // Signature sign a signature with the key for payload + Signature(payload []byte) (string, error) + // Signer returns the signer of the private key + Signer() crypto.Signer + // Public returns public key of the PrivKey + Public() PubKey + // Pem returns the raw private key in PEM format + Pem() []byte + // CSR creates a new CSR from the private key + CSR(role, commonName string, hostList []string, IPList []string) ([]byte, error) + // PKCS12 encodes the certificate to a pfxData + PKCS12(cert *x509.Certificate, ca *CertificateAuthority) ([]byte, error) +} + +// NewKeyPair return a pair of key +func NewKeyPair(keyType, keyScheme string) (PrivKey, error) { + // We only support RSA now + if keyType != KeyTypeRSA { + return nil, ErrorUnsupportedKeyType + } + + // We only support rsassa-pss-sha256 now + if keyScheme != KeySchemeRSASSAPSSSHA256 { + return nil, ErrorUnsupportedKeySchema + } + + return RSAPair() +} + +// NewPrivKey return PrivKey +func NewPrivKey(keyType, keyScheme string, key []byte) (PrivKey, error) { + // We only support RSA now + if keyType != KeyTypeRSA { + return nil, ErrorUnsupportedKeyType + } + + // We only support rsassa-pss-sha256 now + if keyScheme != KeySchemeRSASSAPSSSHA256 { + return nil, ErrorUnsupportedKeySchema + } + + priv := &RSAPrivKey{} + return priv, priv.Deserialize(key) +} + +// NewPubKey return PrivKey +func NewPubKey(keyType, keyScheme string, key []byte) (PubKey, error) { + // We only support RSA now + if keyType != KeyTypeRSA { + return nil, ErrorUnsupportedKeyType + } + + // We only support rsassa-pss-sha256 now + if keyScheme != KeySchemeRSASSAPSSSHA256 { + return nil, ErrorUnsupportedKeySchema + } + + pub := &RSAPubKey{} + return pub, pub.Deserialize(key) +} diff --git a/pkg/crypto/rand/passwd.go b/pkg/crypto/rand/passwd.go new file mode 100644 index 0000000..924c683 --- /dev/null +++ b/pkg/crypto/rand/passwd.go @@ -0,0 +1,53 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rand + +import ( + "github.com/sethvargo/go-password/password" +) + +// charsets with some in similar shapes removed (e.g., O, o, I, l, etc.) +const ( + lowerLetters = "abcdefghijkmnpqrstuvwxyz" + upperLetters = "ABCDEFGHJKLMNPQRSTUVWXYZ" + digits = "0123456789" + symbols = "@^*+-_" +) + +// Password generates a random password +func Password(length int) (string, error) { + if length < 8 { + panic("password length muster be at least 8.") + } + + gi := &password.GeneratorInput{ + LowerLetters: lowerLetters, + UpperLetters: upperLetters, + Digits: digits, + Symbols: symbols, + Reader: Reader, + } + g, err := password.NewGenerator(gi) + if err != nil { + return "", err + } + + // 1/3 of the password are digits and 1/4 of it are symbols + numDigits := length / 3 + numSymbols := length / 4 + // allow repeat if the length is longer than the shortest charset + allowRepeat := (numDigits > len(digits) || numSymbols > len(symbols)) + + return g.Generate(length, numDigits, numSymbols, false, allowRepeat) +} diff --git a/pkg/crypto/rand/passwd_test.go b/pkg/crypto/rand/passwd_test.go new file mode 100644 index 0000000..20ff21c --- /dev/null +++ b/pkg/crypto/rand/passwd_test.go @@ -0,0 +1,36 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rand + +import ( + "testing" +) + +func TestPasswd(t *testing.T) { + for i := 0; i < 100; i++ { + l := Intn(64) + if l < 8 { // make sure it's greater than 8 + l += 8 + } + t.Logf("generating random password of length %d", l) + p, e := Password(l) + if e != nil { + t.Error(e) + } + t.Log(p) + if len(p) != l { + t.Fail() + } + } +} diff --git a/pkg/crypto/rand/rand.go b/pkg/crypto/rand/rand.go new file mode 100644 index 0000000..ebf77ed --- /dev/null +++ b/pkg/crypto/rand/rand.go @@ -0,0 +1,55 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rand + +import ( + cr "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" +) + +var ( + // Reader is a global random number source + Reader *rand.Rand +) + +func init() { + src := make([]byte, 8) + if _, err := cr.Read(src); err != nil { + panic(fmt.Sprintf("initial random: %s", err.Error())) + } + seed := binary.BigEndian.Uint64(src) + Reader = rand.New(rand.NewSource(int64(seed))) +} + +// Int wraps Rand.Int +func Int() int { + return Reader.Int() +} + +// Intn wraps Rand.Intn +func Intn(n int) int { + return Reader.Intn(n) +} + +// Int63n wraps Rand.Int63n +func Int63n(n int64) int64 { + return Reader.Int63n(n) +} + +// Read wraps Rand.Read +func Read(b []byte) (int, error) { + return Reader.Read(b) +} diff --git a/pkg/crypto/rsa.go b/pkg/crypto/rsa.go new file mode 100644 index 0000000..0311c4a --- /dev/null +++ b/pkg/crypto/rsa.go @@ -0,0 +1,225 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import ( + "crypto" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "net" + + "github.com/openGemini/gemix/pkg/crypto/rand" + "github.com/pkg/errors" + "software.sslmate.com/src/go-pkcs12" +) + +// RSAKeyLength define the length of RSA keys +const RSAKeyLength = 2048 + +// RSAPair generate a pair of rsa keys +func RSAPair() (*RSAPrivKey, error) { + key, err := rsa.GenerateKey(rand.Reader, RSAKeyLength) + if err != nil { + return nil, err + } + return &RSAPrivKey{key}, nil +} + +// RSAPubKey represents the public key of RSA +type RSAPubKey struct { + key *rsa.PublicKey +} + +// Type returns the type of the key, e.g. RSA +func (k *RSAPubKey) Type() string { + return KeyTypeRSA +} + +// Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 +func (k *RSAPubKey) Scheme() string { + return KeySchemeRSASSAPSSSHA256 +} + +// Key returns the raw public key +func (k *RSAPubKey) Key() crypto.PublicKey { + return k.key +} + +// Serialize generate the pem format for a key +func (k *RSAPubKey) Serialize() ([]byte, error) { + asn1Bytes, err := x509.MarshalPKIXPublicKey(k.key) + if err != nil { + return nil, err + } + pemKey := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: asn1Bytes, + } + return pem.EncodeToMemory(pemKey), nil +} + +// Deserialize generate a public key from pem format +func (k *RSAPubKey) Deserialize(key []byte) error { + block, _ := pem.Decode(key) + if block == nil { + return ErrorDeserializeKey + } + pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + k.key = pubInterface.(*rsa.PublicKey) + return nil +} + +// VerifySignature check the signature is right +func (k *RSAPubKey) VerifySignature(payload []byte, sig string) error { + if k.key == nil { + return ErrorKeyUninitialized + } + + sha256 := crypto.SHA256.New() + _, err := sha256.Write(payload) + if err != nil { + return errors.WithStack(err) + } + + hashed := sha256.Sum(nil) + + b64decSig, err := base64.StdEncoding.DecodeString(sig) + if err != nil { + return err + } + + return rsa.VerifyPSS(k.key, crypto.SHA256, hashed, b64decSig, nil) +} + +// RSAPrivKey represents the private key of RSA +type RSAPrivKey struct { + key *rsa.PrivateKey +} + +// Type returns the type of the key, e.g. RSA +func (k *RSAPrivKey) Type() string { + return KeyTypeRSA +} + +// Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 +func (k *RSAPrivKey) Scheme() string { + return KeySchemeRSASSAPSSSHA256 +} + +// Serialize generate the pem format for a key +func (k *RSAPrivKey) Serialize() ([]byte, error) { + pemKey := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(k.key), + } + + return pem.EncodeToMemory(pemKey), nil +} + +// Deserialize generate a private key from pem format +func (k *RSAPrivKey) Deserialize(key []byte) error { + block, _ := pem.Decode(key) + if block == nil { + return ErrorDeserializeKey + } + privKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return err + } + k.key = privKey + return nil +} + +// Signature sign a signature with the key for payload +func (k *RSAPrivKey) Signature(payload []byte) (string, error) { + if k.key == nil { + return "", ErrorKeyUninitialized + } + + sha256 := crypto.SHA256.New() + _, err := sha256.Write(payload) + if err != nil { + return "", errors.WithStack(err) + } + + hashed := sha256.Sum(nil) + + sig, err := rsa.SignPSS(rand.Reader, k.key, crypto.SHA256, hashed, nil) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(sig), nil +} + +// Public returns public key of the PrivKey +func (k *RSAPrivKey) Public() PubKey { + return &RSAPubKey{ + key: &k.key.PublicKey, + } +} + +// Signer returns the signer of the private key +func (k *RSAPrivKey) Signer() crypto.Signer { + return k.key +} + +// Pem returns the raw private key im PEM format +func (k *RSAPrivKey) Pem() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(k.key), + }) +} + +// CSR generates a new CSR from given private key +func (k *RSAPrivKey) CSR(role, commonName string, hostList, ipList []string) ([]byte, error) { + var ipAddrList []net.IP + for _, ip := range ipList { + ipAddr := net.ParseIP(ip) + ipAddrList = append(ipAddrList, ipAddr) + } + + // set CSR attributes + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + Organization: []string{pkixOrganization}, + OrganizationalUnit: []string{pkixOrganizationalUnit, role}, + CommonName: commonName, + }, + DNSNames: hostList, + IPAddresses: ipAddrList, + } + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, k.key) + if err != nil { + return nil, err + } + + return csr, nil +} + +// PKCS12 encodes the private and certificate to a PKCS#12 pfxData +func (k *RSAPrivKey) PKCS12(cert *x509.Certificate, ca *CertificateAuthority) ([]byte, error) { + return pkcs12.LegacyRC2.Encode( + k.key, + cert, + []*x509.Certificate{ca.Cert}, + PKCS12Password, + ) +} diff --git a/pkg/crypto/rsa_test.go b/pkg/crypto/rsa_test.go new file mode 100644 index 0000000..881d874 --- /dev/null +++ b/pkg/crypto/rsa_test.go @@ -0,0 +1,65 @@ +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var cases = [][]byte{ + []byte(`openGemini is an awesome database`), + []byte(`I like coding...`), + []byte(`I hate talking...`), + []byte(`Junk food is good`), +} + +var ( + publicTestKey = []byte(` +-----BEGIN PUBLIC KEY----- +MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALqbHeRLCyOdykC5SDLqI49ArYGYG1mq +aH9/GnWjGavZM02fos4lc2w6tCchcUBNtJvGqKwhC5JEnx3RYoSX2ucCAwEAAQ== +-----END PUBLIC KEY----- +`) + + privateTestKey = []byte(` +-----BEGIN RSA PRIVATE KEY----- +MIIBPQIBAAJBALqbHeRLCyOdykC5SDLqI49ArYGYG1mqaH9/GnWjGavZM02fos4l +c2w6tCchcUBNtJvGqKwhC5JEnx3RYoSX2ucCAwEAAQJBAKn6O+tFFDt4MtBsNcDz +GDsYDjQbCubNW+yvKbn4PJ0UZoEebwmvH1ouKaUuacJcsiQkKzTHleu4krYGUGO1 +mEECIQD0dUhj71vb1rN1pmTOhQOGB9GN1mygcxaIFOWW8znLRwIhAMNqlfLijUs6 +rY+h1pJa/3Fh1HTSOCCCCWA0NRFnMANhAiEAwddKGqxPO6goz26s2rHQlHQYr47K +vgPkZu2jDCo7trsCIQC/PSfRsnSkEqCX18GtKPCjfSH10WSsK5YRWAY3KcyLAQIh +AL70wdUu5jMm2ex5cZGkZLRB50yE6rBiHCd5W1WdTFoe +-----END RSA PRIVATE KEY----- +`) +) + +func TestSignAndVerify(t *testing.T) { + priv, err := RSAPair() + assert.Nil(t, err) + + for _, cas := range cases { + sig, err := priv.Signature(cas) + assert.Nil(t, err) + assert.Nil(t, priv.Public().VerifySignature(cas, sig)) + } +} + +func TestSeriAndDeseri(t *testing.T) { + pub := RSAPubKey{} + pri := RSAPrivKey{} + + _, err := pri.Signature([]byte("foo")) + assert.EqualError(t, err, ErrorKeyUninitialized.Error()) + + assert.EqualError(t, pub.VerifySignature([]byte(`foo`), "sig"), ErrorKeyUninitialized.Error()) + + assert.Nil(t, pub.Deserialize(publicTestKey)) + assert.Nil(t, pri.Deserialize(privateTestKey)) + + for _, cas := range cases { + sig, err := pri.Signature(cas) + assert.Nil(t, err) + assert.Nil(t, pub.VerifySignature(cas, sig)) + } +} diff --git a/pkg/gui/cliutil.go b/pkg/gui/cliutil.go index d773847..22df552 100644 --- a/pkg/gui/cliutil.go +++ b/pkg/gui/cliutil.go @@ -22,6 +22,12 @@ import ( "github.com/spf13/cobra" ) +var ( + errNS = errorx.NewNamespace("gui") + errMismatchArgs = errNS.NewType("mismatch_args", utils.ErrTraitPreCheck) + errOperationAbort = errNS.NewType("operation_aborted", utils.ErrTraitPreCheck) +) + // CheckCommandArgsAndMayPrintHelp checks whether user passes enough number of arguments. // If insufficient number of arguments are passed, an error with proper suggestion will be raised. // When no argument is passed, command help will be printed and no error will be raised. @@ -34,7 +40,9 @@ func CheckCommandArgsAndMayPrintHelp(cmd *cobra.Command, args []string, minArgs return false, cmd.Help() } if lenArgs < minArgs { - return false, fmt.Errorf("expect at least %d arguments, but received %d arguments", minArgs, len(args)) + return false, errMismatchArgs. + New("Expect at least %d arguments, but received %d arguments", minArgs, lenArgs). + WithProperty(SuggestionFromString(cmd.UsageString())) } return true, nil } diff --git a/pkg/gui/color.go b/pkg/gui/color.go new file mode 100644 index 0000000..5c9a5e4 --- /dev/null +++ b/pkg/gui/color.go @@ -0,0 +1,82 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package gui + +// A set of predefined color palettes. You should only reference a color in this palette so that a color +// change can take effect globally. + +import ( + "strings" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + // ColorErrorMsg is the ansi color formatter for error messages + ColorErrorMsg = color.New(color.FgHiRed) + // ColorSuccessMsg is the ansi color formatter for success messages + ColorSuccessMsg = color.New(color.FgHiGreen) + // ColorWarningMsg is the ansi color formatter for warning messages + ColorWarningMsg = color.New(color.FgHiYellow) + // ColorCommand is the ansi color formatter for commands + ColorCommand = color.New(color.FgHiBlue, color.Bold) + // ColorKeyword is the ansi color formatter for cluster name + ColorKeyword = color.New(color.FgHiBlue, color.Bold) +) + +func newColorizeFn(c *color.Color) func() string { + const sep = "----" + seq := c.Sprint(sep) + if len(seq) == len(sep) { + return func() string { + return "" + } + } + colorSeq := strings.Split(seq, sep)[0] + return func() string { + return colorSeq + } +} + +func newColorResetFn() func() string { + const sep = "----" + seq := color.New(color.FgWhite).Sprint(sep) + if len(seq) == len(sep) { + return func() string { + return "" + } + } + colorResetSeq := strings.Split(seq, sep)[1] + return func() string { + return colorResetSeq + } +} + +// AddColorFunctions invokes callback for each colorize functions. +func AddColorFunctions(addCallback func(string, any)) { + addCallback("ColorErrorMsg", newColorizeFn(ColorErrorMsg)) + addCallback("ColorSuccessMsg", newColorizeFn(ColorSuccessMsg)) + addCallback("ColorWarningMsg", newColorizeFn(ColorWarningMsg)) + addCallback("ColorCommand", newColorizeFn(ColorCommand)) + addCallback("ColorKeyword", newColorizeFn(ColorKeyword)) + addCallback("ColorReset", newColorResetFn()) +} + +// AddColorFunctionsForCobra adds colorize functions to cobra, so that they can be used in usage or help. +func AddColorFunctionsForCobra() { + AddColorFunctions(func(name string, f any) { + cobra.AddTemplateFunc(name, f) + }) +} diff --git a/pkg/gui/gui.go b/pkg/gui/gui.go index 7c67317..777828b 100644 --- a/pkg/gui/gui.go +++ b/pkg/gui/gui.go @@ -22,6 +22,7 @@ import ( "syscall" "github.com/AstroProfundis/tabby" + "github.com/fatih/color" "golang.org/x/term" ) @@ -54,6 +55,17 @@ func addRow(t *tabby.Tabby, rawLine []string, header bool) { } } +// pre-defined ascii art strings +const ( + ASCIIArtWarning = ` + ██ ██ █████ ██████ ███ ██ ██ ███ ██ ██████ + ██ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██ ██ + ██ █ ██ ███████ ██████ ██ ██ ██ ██ ██ ██ ██ ██ ███ + ██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ + ███ ███ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██████ +` +) + // Prompt accepts input from console by user func Prompt(prompt string) string { if prompt != "" { @@ -101,6 +113,25 @@ func PromptForConfirmOrAbortError(format string, a ...any) error { return nil } +// PromptForConfirmAnswer accepts string from console by user, default to empty and only return +// true if the user input is exactly the same as pre-defined answer. +func PromptForConfirmAnswer(answer string, format string, a ...any) (bool, string) { + ans := Prompt(fmt.Sprintf(format, a...) + fmt.Sprintf("\n(Type \"%s\" to continue)\n:", color.CyanString(answer))) + if ans == answer { + return true, ans + } + return false, ans +} + +// PromptForAnswerOrAbortError accepts string from console by user, generates AbortError if user does +// not input the pre-defined answer. +func PromptForAnswerOrAbortError(answer string, format string, a ...any) error { + if pass, ans := PromptForConfirmAnswer(answer, format, a...); !pass { + return errOperationAbort.New("Operation aborted by user (with incorrect answer '%s')", ans) + } + return nil +} + // PromptForPassword reads a password input from console func PromptForPassword(format string, a ...any) string { defer fmt.Println("") diff --git a/pkg/localdata/config.go b/pkg/localdata/config.go new file mode 100644 index 0000000..79b73af --- /dev/null +++ b/pkg/localdata/config.go @@ -0,0 +1,56 @@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package localdata + +import ( + "os" + "path" + + "github.com/BurntSushi/toml" + "github.com/openGemini/gemix/pkg/utils" +) + +type configBase struct { + file string +} + +// GemixConfig represent the config file of Gemix +type GemixConfig struct { + configBase + Mirror string `toml:"mirror"` +} + +// InitConfig returns a GemixConfig struct which can flush config back to disk +func InitConfig(root string) (*GemixConfig, error) { + config := GemixConfig{configBase{path.Join(root, "gemix.toml")}, ""} + if utils.IsNotExist(config.file) { + return &config, nil + } + // We can ignore any error at current + // If we have more configs in the future, we should check the error + if _, err := toml.DecodeFile(config.file, &config); err != nil { + return nil, err + } + return &config, nil +} + +// Flush config to disk +func (c *GemixConfig) Flush() error { + f, err := os.OpenFile(c.file, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664) + if err != nil { + return err + } + defer f.Close() + + return toml.NewEncoder(f).Encode(c) +} diff --git a/pkg/localdata/profile.go b/pkg/localdata/profile.go new file mode 100644 index 0000000..7c8a636 --- /dev/null +++ b/pkg/localdata/profile.go @@ -0,0 +1,303 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package localdata + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "os/user" + "path/filepath" + "sort" + "strings" + + "github.com/openGemini/gemix/pkg/utils" + "github.com/pkg/errors" + "golang.org/x/mod/semver" +) + +// Profile represents the `gemix` profile +type Profile struct { + root string + Config *GemixConfig +} + +// NewProfile returns a new profile instance +func NewProfile(root string, config *GemixConfig) *Profile { + return &Profile{root: root, Config: config} +} + +// InitProfile creates a new profile using environment variables and defaults. +func InitProfile() *Profile { + var profileDir string + switch { + case os.Getenv(EnvNameHome) != "": + profileDir = os.Getenv(EnvNameHome) + case DefaultGemixHome != "": + profileDir = DefaultGemixHome + default: + u, err := user.Current() + if err != nil { + panic("cannot get current user information: " + err.Error()) + } + profileDir = filepath.Join(u.HomeDir, ProfileDirName) + } + + cfg, err := InitConfig(profileDir) + if err != nil { + panic("cannot read config: " + err.Error()) + } + return NewProfile(profileDir, cfg) +} + +// Path returns a full path which is related to profile root directory +func (p *Profile) Path(relpath ...string) string { + return filepath.Join(append([]string{p.root}, relpath...)...) +} + +// Root returns the root path of the `gemix` +func (p *Profile) Root() string { + return p.root +} + +// GetComponentInstalledVersion return the installed version of component. +func (p *Profile) GetComponentInstalledVersion(component string, ver utils.Version) (utils.Version, error) { + if !ver.IsEmpty() && ver.String() != utils.NightlyVersionAlias { + return ver, nil + } + versions, err := p.InstalledVersions(component) + if err != nil { + return "", err + } + + // Use the latest version if user doesn't specify a specific version + // report an error if the specific component doesn't be installed + + // Check whether the specific version exist in local + if len(versions) == 0 { + return "", errors.Errorf("component not installed, please try `gemix install %s` to install it", component) + } + sort.Slice(versions, func(i, j int) bool { + return semver.Compare(versions[i], versions[j]) < 0 + }) + if ver.String() != utils.NightlyVersionAlias { + for i := len(versions); i > 0; i-- { + if utils.Version(versions[i-1]).IsNightly() { + return utils.Version(versions[i-1]), nil + } + } + return "", errors.Errorf("component(nightly) not installed, please try `gemix install %s:nightly` to install it", component) + } + return utils.Version(versions[len(versions)-1]), nil +} + +// ComponentInstalledPath returns the path where the component installed +func (p *Profile) ComponentInstalledPath(component string, version utils.Version) (string, error) { + installedVersion, err := p.GetComponentInstalledVersion(component, version) + if err != nil { + return "", err + } + return filepath.Join(p.Path(ComponentParentDir), component, installedVersion.String()), nil +} + +// SaveTo saves file to the profile directory, path is relative to the +// profile directory of current user +func (p *Profile) SaveTo(path string, data []byte, perm os.FileMode) error { + fullPath := filepath.Join(p.root, path) + // create subdirectory if needed + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return errors.WithStack(err) + } + return os.WriteFile(fullPath, data, perm) +} + +// WriteJSON writes struct to a file (in the profile directory) in JSON format +func (p *Profile) WriteJSON(path string, data any) error { + jsonData, err := json.MarshalIndent(data, "", " ") + if err != nil { + return errors.WithStack(err) + } + return p.SaveTo(path, jsonData, 0644) +} + +// readJSON read file and unmarshal to target `data` +func (p *Profile) readJSON(path string, data any) error { + fullPath := filepath.Join(p.root, path) + file, err := os.Open(fullPath) + if err != nil { + return errors.WithStack(err) + } + defer file.Close() + + return json.NewDecoder(file).Decode(data) +} + +// ReadMetaFile reads a Process object from dirName/MetaFilename. Returns (nil, nil) if a metafile does not exist. +func (p *Profile) ReadMetaFile(dirName string) (*Process, error) { + metaFile := filepath.Join(DataParentDir, dirName, MetaFilename) + + // If the path doesn't contain the meta file, which means startup interrupted + if utils.IsNotExist(p.Path(metaFile)) { + return nil, nil + } + + var process Process + err := p.readJSON(metaFile, &process) + return &process, err +} + +// InstalledComponents returns the installed components +func (p *Profile) InstalledComponents() ([]string, error) { + compDir := filepath.Join(p.root, ComponentParentDir) + fileInfos, err := os.ReadDir(compDir) + if err != nil && os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, errors.WithStack(err) + } + var components []string + for _, fi := range fileInfos { + if !fi.IsDir() { + continue + } + components = append(components, fi.Name()) + } + sort.Strings(components) + return components, nil +} + +// InstalledVersions returns the installed versions of specific component +func (p *Profile) InstalledVersions(component string) ([]string, error) { + path := filepath.Join(p.root, ComponentParentDir, component) + if utils.IsNotExist(path) { + return nil, nil + } + + fileInfos, err := os.ReadDir(path) + if err != nil { + return nil, errors.WithStack(err) + } + var versions []string + for _, fi := range fileInfos { + if !fi.IsDir() { + continue + } + sub, err := os.ReadDir(filepath.Join(path, fi.Name())) + if err != nil || len(sub) < 1 { + continue + } + versions = append(versions, fi.Name()) + } + return versions, nil +} + +// VersionIsInstalled returns true if exactly version of component is installed. +func (p *Profile) VersionIsInstalled(component, version string) (bool, error) { + installed, err := p.InstalledVersions(component) + if err != nil { + return false, err + } + for _, v := range installed { + if v == version { + return true, nil + } + } + return false, nil +} + +// ResetMirror reset root.json and cleanup manifests directory +func (p *Profile) ResetMirror(addr, root string) error { + // Calculating root.json path + shaWriter := sha256.New() + if _, err := io.Copy(shaWriter, strings.NewReader(addr)); err != nil { + return err + } + localRoot := p.Path("bin", fmt.Sprintf("%s.root.json", hex.EncodeToString(shaWriter.Sum(nil))[:16])) + + if root == "" { + switch { + case utils.IsExist(localRoot): + root = localRoot + case strings.HasSuffix(addr, "/"): + root = addr + "root.json" + default: + root = addr + "/root.json" + } + } + + // Fetch root.json + var wc io.ReadCloser + if strings.HasPrefix(root, "http") { + resp, err := http.Get(root) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return errors.Errorf("Fetch remote root.json returns http code %d", resp.StatusCode) + } + wc = resp.Body + } else { + file, err := os.Open(root) + if err != nil { + return err + } + wc = file + } + defer wc.Close() + + f, err := os.OpenFile(p.Path("bin", "root.json"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664) + if err != nil { + return err + } + if _, err := io.Copy(f, wc); err != nil { + f.Close() + return err + } + f.Close() + + // Only cache remote mirror + if strings.HasPrefix(addr, "http") && root != localRoot { + if strings.HasPrefix(root, "http") { + fmt.Printf("WARN: adding root certificate via internet: %s\n", root) + fmt.Printf("You can revoke this by remove %s\n", localRoot) + } + _ = utils.Copy(p.Path("bin", "root.json"), localRoot) + } + + if err := os.RemoveAll(p.Path(ManifestParentDir)); err != nil { + return err + } + + p.Config.Mirror = addr + return p.Config.Flush() +} + +// Process represents a process as written to a meta file. +type Process struct { + Component string `json:"component"` + CreatedTime string `json:"created_time"` + Pid int `json:"pid"` // PID of the process + Exec string `json:"exec"` // Path to the binary + Args []string `json:"args,omitempty"` // Command line arguments + Env []string `json:"env,omitempty"` // Environment variables + Dir string `json:"dir,omitempty"` // Data directory + Cmd *exec.Cmd `json:"-"` +} diff --git a/pkg/logger/audit.go b/pkg/logger/audit.go index 3a687cf..454a12e 100644 --- a/pkg/logger/audit.go +++ b/pkg/logger/audit.go @@ -18,6 +18,7 @@ import ( "bytes" "os" + "github.com/openGemini/gemix/pkg/cluster/audit" "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -54,10 +55,10 @@ func OutputAuditLogToFileIfEnabled(dir, fileSuffix string) error { return err } - //err := audit.OutputAuditLog(dir, fileSuffix, auditBuffer.Bytes()) - //if err != nil { - // return err - //} + err := audit.OutputAuditLog(dir, fileSuffix, auditBuffer.Bytes()) + if err != nil { + return err + } if dir == auditDir { auditBuffer.Reset() diff --git a/pkg/logger/debug.go b/pkg/logger/debug.go index 8972f9c..228a523 100644 --- a/pkg/logger/debug.go +++ b/pkg/logger/debug.go @@ -16,7 +16,14 @@ package logger import ( "bytes" + "fmt" + "os" + "path/filepath" + "time" + "github.com/openGemini/gemix/pkg/gui" + "github.com/openGemini/gemix/pkg/localdata" + "github.com/openGemini/gemix/pkg/utils" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -31,25 +38,25 @@ func newDebugLogCore() zapcore.Core { // OutputDebugLog outputs debug log in the current working directory. func OutputDebugLog(prefix string) { - //logDir := os.Getenv(localdata.EnvNameLogPath) - //if logDir == "" { - // profile := localdata.InitProfile() - // logDir = profile.Path("logs") - //} - //if err := os.MkdirAll(logDir, 0750); err != nil { - // _, _ = fmt.Fprintf(os.Stderr, "\nCreate debug logs(%s) directory failed %v.\n", logDir, err) - // return - //} - // - //// FIXME: Stupid go does not allow writing fraction seconds without a leading dot. - //fileName := time.Now().Format(fmt.Sprintf("%s-debug-2006-01-02-15-04-05.log", prefix)) - //filePath := filepath.Join(logDir, fileName) - // - //err := utils.WriteFile(filePath, debugBuffer.Bytes(), 0644) - //if err != nil { - // _, _ = gui.ColorWarningMsg.Fprint(os.Stderr, "\nWarn: Failed to write error debug log.\n") - //} else { - // _, _ = fmt.Fprintf(os.Stderr, "\nVerbose debug logs has been written to %s.\n", tui.ColorKeyword.Sprint(filePath)) - //} + logDir := os.Getenv(localdata.EnvNameLogPath) + if logDir == "" { + profile := localdata.InitProfile() + logDir = profile.Path("logs") + } + if err := os.MkdirAll(logDir, 0750); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "\nCreate debug logs(%s) directory failed %v.\n", logDir, err) + return + } + + // FIXME: Stupid go does not allow writing fraction seconds without a leading dot. + fileName := time.Now().Format(fmt.Sprintf("%s-debug-2006-01-02-15-04-05.log", prefix)) + filePath := filepath.Join(logDir, fileName) + + err := utils.WriteFile(filePath, debugBuffer.Bytes(), 0644) + if err != nil { + _, _ = gui.ColorWarningMsg.Fprint(os.Stderr, "\nWarn: Failed to write error debug log.\n") + } else { + _, _ = fmt.Fprintf(os.Stderr, "\nVerbose debug logs has been written to %s.\n", gui.ColorKeyword.Sprint(filePath)) + } debugBuffer.Reset() } diff --git a/pkg/utils/ioutil.go b/pkg/utils/ioutil.go new file mode 100644 index 0000000..87e3780 --- /dev/null +++ b/pkg/utils/ioutil.go @@ -0,0 +1,433 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "crypto/sha1" + "encoding/hex" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/otiai10/copy" + "github.com/pkg/errors" +) + +var ( + fileLocks = make(map[string]*sync.Mutex) + filesLock = sync.Mutex{} +) + +// IsSymExist check whether a symbol link is exist +func IsSymExist(path string) bool { + _, err := os.Lstat(path) + return !os.IsNotExist(err) +} + +// IsExist check whether a path is exist +func IsExist(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} + +// IsNotExist check whether a path is not exist +func IsNotExist(path string) bool { + _, err := os.Stat(path) + return os.IsNotExist(err) +} + +// IsEmptyDir check whether a path is an empty directory +func IsEmptyDir(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) + if err == io.EOF { + return true, nil + } + return false, err +} + +// IsExecBinary check whether a path is a valid executable +func IsExecBinary(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + return !info.IsDir() && info.Mode()&0111 == 0111 +} + +// IsSubDir returns if sub is a sub directory of parent +func IsSubDir(parent, sub string) bool { + up := ".." + string(os.PathSeparator) + + rel, err := filepath.Rel(parent, sub) + if err != nil { + return false + } + if !strings.HasPrefix(rel, up) && rel != ".." { + return true + } + return false +} + +// Tar compresses the folder to tarball with gzip +func Tar(writer io.Writer, from string) error { + compressW := gzip.NewWriter(writer) + defer compressW.Close() + tarW := tar.NewWriter(compressW) + defer tarW.Close() + + // NOTE: filepath.Walk does not follow the symbolic link. + return filepath.Walk(from, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + + link := "" + if info.Mode()&fs.ModeSymlink != 0 { + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + header, _ := tar.FileInfoHeader(info, link) + header.Name, _ = filepath.Rel(from, path) + // skip "." + if header.Name == "." { + return nil + } + + err = tarW.WriteHeader(header) + if err != nil { + return err + } + if info.Mode().IsRegular() { + fd, err := os.Open(path) + if err != nil { + return err + } + defer fd.Close() + _, err = io.Copy(tarW, fd) + return err + } + return nil + }) +} + +// Untar decompresses the tarball +func Untar(reader io.Reader, to string) error { + gr, err := gzip.NewReader(reader) + if err != nil { + return errors.WithStack(err) + } + defer gr.Close() + + tr := tar.NewReader(gr) + + decFile := func(hdr *tar.Header) error { + file := path.Join(to, hdr.Name) + err := MkdirAll(filepath.Dir(file), 0755) + if err != nil { + return err + } + fw, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, hdr.FileInfo().Mode()) + if err != nil { + return errors.WithStack(err) + } + defer fw.Close() + + _, err = io.Copy(fw, tr) + return errors.WithStack(err) + } + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.WithStack(err) + } + switch hdr.Typeflag { + case tar.TypeDir: + if err := MkdirAll(path.Join(to, hdr.Name), hdr.FileInfo().Mode()); err != nil { + return errors.WithStack(err) + } + case tar.TypeSymlink: + if err = os.Symlink(hdr.Linkname, filepath.Join(to, hdr.Name)); err != nil { + return errors.WithStack(err) + } + default: + if err := decFile(hdr); err != nil { + return errors.WithStack(err) + } + } + } + return nil +} + +// Copy copies a file or directory from src to dst +func Copy(src, dst string) error { + // check if src is a directory + fi, err := os.Stat(src) + if err != nil { + return err + } + if fi.IsDir() { + // use copy.Copy to copy a directory + return copy.Copy(src, dst) + } + + // for regular files + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + err = out.Close() + if err != nil { + return err + } + + err = os.Chmod(dst, fi.Mode()) + if err != nil { + return err + } + + // Make sure the created dst's modify time is newer (at least equal) than src + // this is used to workaround github action virtual filesystem + ofi, err := os.Stat(dst) + if err != nil { + return err + } + if fi.ModTime().After(ofi.ModTime()) { + return os.Chtimes(dst, fi.ModTime(), fi.ModTime()) + } + return nil +} + +// Move moves a file from src to dst, this is done by copying the file and then +// delete the old one. Use os.Rename() to rename file within the same filesystem +// instead this, it's more lightweight but can not be used across devices. +func Move(src, dst string) error { + if err := Copy(src, dst); err != nil { + return errors.WithStack(err) + } + return errors.WithStack(os.RemoveAll(src)) +} + +// Checksum returns the sha1 sum of target file +func Checksum(file string) (string, error) { + tarball, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer tarball.Close() + + sha1Writter := sha1.New() + if _, err := io.Copy(sha1Writter, tarball); err != nil { + return "", err + } + + checksum := hex.EncodeToString(sha1Writter.Sum(nil)) + return checksum, nil +} + +// TailN try get the latest n line of the file. +func TailN(fname string, n int) (lines []string, err error) { + file, err := os.Open(fname) + if err != nil { + return nil, errors.WithStack(err) + } + defer file.Close() + + estimateLineSize := 1024 + + stat, err := os.Stat(fname) + if err != nil { + return nil, errors.WithStack(err) + } + + start := int(stat.Size()) - n*estimateLineSize + if start < 0 { + start = 0 + } + + _, err = file.Seek(int64(start), 0 /*means relative to the origin of the file*/) + if err != nil { + return nil, errors.WithStack(err) + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if len(lines) > n { + lines = lines[len(lines)-n:] + } + + return +} + +func fileLock(path string) *sync.Mutex { + filesLock.Lock() + defer filesLock.Unlock() + + if _, ok := fileLocks[path]; !ok { + fileLocks[path] = &sync.Mutex{} + } + + return fileLocks[path] +} + +// SaveFileWithBackup will backup the file before save it. +// e.g., backup meta.yaml as meta-2006-01-02T15:04:05Z07:00.yaml +// backup the files in the same dir of path if backupDir is empty. +func SaveFileWithBackup(path string, data []byte, backupDir string) error { + fileLock(path).Lock() + defer fileLock(path).Unlock() + + info, err := os.Stat(path) + if err != nil && !os.IsNotExist(err) { + return errors.WithStack(err) + } + + if info != nil && info.IsDir() { + return errors.Errorf("%s is directory", path) + } + + // backup file + if !os.IsNotExist(err) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + var backupName string + timestr := time.Now().Format(time.RFC3339Nano) + p := strings.Split(base, ".") + if len(p) == 1 { + backupName = base + "-" + timestr + } else { + backupName = strings.Join(p[0:len(p)-1], ".") + "-" + timestr + "." + p[len(p)-1] + } + + backupData, err := os.ReadFile(path) + if err != nil { + return errors.WithStack(err) + } + + var backupPath string + if backupDir != "" { + backupPath = filepath.Join(backupDir, backupName) + } else { + backupPath = filepath.Join(dir, backupName) + } + err = os.WriteFile(backupPath, backupData, 0644) + if err != nil { + return errors.WithStack(err) + } + } + + err = os.WriteFile(path, data, 0644) + if err != nil { + return errors.WithStack(err) + } + + return nil +} + +// MkdirAll basically copied from os.MkdirAll, but use max(parent permission,minPerm) +func MkdirAll(path string, minPerm os.FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent. + err = MkdirAll(path[:j-1], minPerm) + if err != nil { + return err + } + } + + perm := minPerm + fi, err := os.Stat(filepath.Dir(path)) + if err == nil { + perm |= fi.Mode().Perm() + } + + // Parent now exists; invoke Mkdir and use its result; inheritance parent perm. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// WriteFile call os.WriteFile, but use max(parent permission,minPerm) +func WriteFile(name string, data []byte, perm os.FileMode) error { + fi, err := os.Stat(filepath.Dir(name)) + if err == nil { + perm |= (fi.Mode().Perm() & 0666) + } + return os.WriteFile(name, data, perm) +} diff --git a/pkg/utils/semver.go b/pkg/utils/semver.go index d71c87e..4972c02 100644 --- a/pkg/utils/semver.go +++ b/pkg/utils/semver.go @@ -20,6 +20,9 @@ import ( "golang.org/x/mod/semver" ) +// NightlyVersionAlias represents latest build of master branch. +const NightlyVersionAlias = "nightly" + // LatestVersionAlias represents the latest build (excluding nightly versions). const LatestVersionAlias = "latest" @@ -41,3 +44,28 @@ func FmtVer(ver string) (string, error) { } return v, nil } + +type ( + // Version represents a version string, like: v3.1.2 + Version string +) + +// IsValid checks whether is the version string valid +func (v Version) IsValid() bool { + return v != "" && semver.IsValid(string(v)) +} + +// IsEmpty returns true if the `Version` is a empty string +func (v Version) IsEmpty() bool { + return v == "" +} + +// IsNightly returns true if the version is nightly +func (v Version) IsNightly() bool { + return strings.Contains(string(v), NightlyVersionAlias) +} + +// String implements the fmt.Stringer interface +func (v Version) String() string { + return string(v) +}