diff --git a/pkg/application/service/services.go b/pkg/application/service/services.go index 4d15b6b0..2602dccc 100644 --- a/pkg/application/service/services.go +++ b/pkg/application/service/services.go @@ -38,7 +38,7 @@ type VerificationService interface { } type MemberClusterService interface { - GetClusterAccess(userID, username, workspace, proxyPluginName string) (*access.ClusterAccess, error) + GetClusterAccess(userID, username, workspace, proxyPluginName string, publicViewerEnabled bool) (*access.ClusterAccess, error) } type Services interface { diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go index d950c1bb..76be362a 100644 --- a/pkg/proxy/proxy.go +++ b/pkg/proxy/proxy.go @@ -272,7 +272,7 @@ func (p *Proxy) processRequest(ctx echo.Context) (string, *access.ClusterAccess, } ctx.Set(context.WorkspaceKey, workspaceName) // set workspace context for logging - cluster, err := p.app.MemberClusterService().GetClusterAccess(userID, username, workspaceName, proxyPluginName) + cluster, err := p.app.MemberClusterService().GetClusterAccess(userID, username, workspaceName, proxyPluginName, false) if err != nil { return "", nil, crterrors.NewInternalError(errs.New("unable to get target cluster"), err.Error()) } diff --git a/pkg/proxy/service/cluster_service.go b/pkg/proxy/service/cluster_service.go index 95551105..29892b63 100644 --- a/pkg/proxy/service/cluster_service.go +++ b/pkg/proxy/service/cluster_service.go @@ -11,6 +11,7 @@ import ( servicecontext "github.com/codeready-toolchain/registration-service/pkg/application/service/context" "github.com/codeready-toolchain/registration-service/pkg/log" "github.com/codeready-toolchain/registration-service/pkg/proxy/access" + "github.com/codeready-toolchain/registration-service/pkg/signup" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" routev1 "github.com/openshift/api/route/v1" @@ -40,21 +41,21 @@ func NewMemberClusterService(context servicecontext.ServiceContext, options ...O return si } -func (s *ServiceImpl) GetClusterAccess(userID, username, workspace, proxyPluginName string) (*access.ClusterAccess, error) { - signup, err := s.Services().SignupService().GetSignupFromInformer(nil, userID, username, false) // don't check for usersignup complete status, since it might cause the proxy blocking the request and returning an error when quick transitions from ready to provisioning are happening. - if err != nil { - return nil, err - } - // if signup has the CompliantUsername set it means that MUR was created and useraccount is provisioned - if signup == nil || signup.CompliantUsername == "" { - cause := errs.New("user is not provisioned (yet)") - log.Error(nil, cause, fmt.Sprintf("signup object: %+v", signup)) - return nil, cause - } - +func (s *ServiceImpl) GetClusterAccess(userID, username, workspace, proxyPluginName string, publicViewerEnabled bool) (*access.ClusterAccess, error) { // if workspace is not provided then return the default space access if workspace == "" { - return s.accessForCluster(signup.APIEndpoint, signup.ClusterName, signup.CompliantUsername, proxyPluginName) + return s.getClusterAccessForDefaultWorkspace(userID, username, proxyPluginName) + } + + return s.getSpaceAccess(userID, username, workspace, proxyPluginName, publicViewerEnabled) +} + +// getSpaceAccess retrieves space access for an user +func (s *ServiceImpl) getSpaceAccess(userID, username, workspace, proxyPluginName string, publicViewerEnabled bool) (*access.ClusterAccess, error) { + // retrieve the user's complaint name + complaintUserName, err := s.getUserSignupComplaintName(userID, username, publicViewerEnabled) + if err != nil { + return nil, err } // look up space @@ -65,7 +66,52 @@ func (s *ServiceImpl) GetClusterAccess(userID, username, workspace, proxyPluginN return nil, fmt.Errorf("the requested space is not available") } - return s.accessForSpace(space, signup.CompliantUsername, proxyPluginName) + return s.accessForSpace(space, complaintUserName, proxyPluginName) +} + +func (s *ServiceImpl) getUserSignupComplaintName(userID, username string, publicViewerEnabled bool) (string, error) { + // if PublicViewer is enabled and the requested user is the PublicViewer, than no lookup is required + if publicViewerEnabled && username == toolchainv1alpha1.KubesawAuthenticatedUsername { + return username, nil + } + + // retrieve the UserSignup from cache + userSignup, err := s.getSignupFromInformerForProvisionedUser(userID, username) + if err != nil { + return "", err + } + + return userSignup.CompliantUsername, nil +} + +// getClusterAccessForDefaultWorkspace retrieves the cluster for the user's default workspace +func (s *ServiceImpl) getClusterAccessForDefaultWorkspace(userID, username, proxyPluginName string) (*access.ClusterAccess, error) { + // retrieve the UserSignup from cache + userSignup, err := s.getSignupFromInformerForProvisionedUser(userID, username) + if err != nil { + return nil, err + } + + // retrieve user's access for cluster + return s.accessForCluster(userSignup.APIEndpoint, userSignup.ClusterName, userSignup.CompliantUsername, proxyPluginName) +} + +func (s *ServiceImpl) getSignupFromInformerForProvisionedUser(userID, username string) (*signup.Signup, error) { + // don't check for usersignup complete status, since it might cause the proxy blocking the request + // and returning an error when quick transitions from ready to provisioning are happening. + userSignup, err := s.Services().SignupService().GetSignupFromInformer(nil, userID, username, false) + if err != nil { + return nil, err + } + + // if signup has the CompliantUsername set it means that MUR was created and useraccount is provisioned + if userSignup == nil || userSignup.CompliantUsername == "" { + cause := errs.New("user is not provisioned (yet)") + log.Error(nil, cause, fmt.Sprintf("signup object: %+v", userSignup)) + return nil, cause + } + + return userSignup, nil } func (s *ServiceImpl) accessForSpace(space *toolchainv1alpha1.Space, username, proxyPluginName string) (*access.ClusterAccess, error) { diff --git a/pkg/proxy/service/cluster_service_test.go b/pkg/proxy/service/cluster_service_test.go index 6ec55e78..e818da0c 100644 --- a/pkg/proxy/service/cluster_service_test.go +++ b/pkg/proxy/service/cluster_service_test.go @@ -107,318 +107,410 @@ func (s *TestClusterServiceSuite) TestGetClusterAccess() { }, ) - s.Run("unable to get signup", func() { - s.Run("signup service returns error", func() { - sc.MockGetSignup = func(_, _ string) (*signup.Signup, error) { - return nil, errors.New("oopsi woopsi") - } - - // when - _, err := svc.GetClusterAccess("789-ready", "", "", "") - - // then - require.EqualError(s.T(), err, "oopsi woopsi") - }) - - sc.MockGetSignup = sc.DefaultMockGetSignup() // restore the default signup service, so it doesn't return an error anymore + tt := map[string]struct { + publicViewerEnabled bool + }{ + "public-viewer enabled": {publicViewerEnabled: true}, + "public-viewer disabled": {publicViewerEnabled: false}, + } - s.Run("userid is not found", func() { - // when - _, err := svc.GetClusterAccess("unknown_id", "", "", "") + for k, tc := range tt { + publicViewerEnabled := tc.publicViewerEnabled - // then - require.EqualError(s.T(), err, "user is not provisioned (yet)") - }) + s.Run(k, func() { - s.Run("username is not found", func() { - // when - _, err := svc.GetClusterAccess("", "unknown_username", "", "") + s.Run("unable to get signup", func() { + tt := map[string]struct { + workspace string + }{ + "default workspace": {workspace: ""}, + "not-existing workspace": {workspace: "not-existing"}, + } + for k, tc := range tt { + s.Run(k, func() { + s.Run("signup service returns error", func() { + sc.MockGetSignup = func(_, _ string) (*signup.Signup, error) { + return nil, errors.New("oopsi woopsi") + } - // then - require.EqualError(s.T(), err, "user is not provisioned (yet)") - }) + // when + _, err := svc.GetClusterAccess("789-ready", "", tc.workspace, "", publicViewerEnabled) - s.Run("user is not provisioned yet", func() { - // when - _, err := svc.GetClusterAccess("456-not-ready", "", "", "") + // then + require.EqualError(s.T(), err, "oopsi woopsi") + }) - // then - require.EqualError(s.T(), err, "user is not provisioned (yet)") - }) - }) + sc.MockGetSignup = sc.DefaultMockGetSignup() // restore the default signup service, so it doesn't return an error anymore - s.Run("unable to get space", func() { - s.Run("informer service returns error", func() { - original := inf.GetSpaceFunc - defer func() { // restore original GetSpaceFunc after test - inf.GetSpaceFunc = original - s.Application.MockInformerService(inf) - }() - inf.GetSpaceFunc = func(_ string) (*toolchainv1alpha1.Space, error) { // informer error - return nil, fmt.Errorf("oopsi woopsi") - } - s.Application.MockInformerService(inf) + s.Run("userid is not found", func() { + // when + _, err := svc.GetClusterAccess("unknown_id", "", tc.workspace, "", publicViewerEnabled) - // when - _, err := svc.GetClusterAccess("789-ready", "", "smith2", "") + // then + require.EqualError(s.T(), err, "user is not provisioned (yet)") + }) - // then - // original error is only logged so that it doesn't reveal information about a space that may not belong to the requestor - require.EqualError(s.T(), err, "the requested space is not available") - }) + s.Run("username is not found", func() { + // when + _, err := svc.GetClusterAccess("", "unknown_username", tc.workspace, "", publicViewerEnabled) - s.Run("space not found", func() { - // when - _, err := svc.GetClusterAccess("789-ready", "", "unknown", "") // unknown workspace requested + // then + require.EqualError(s.T(), err, "user is not provisioned (yet)") + }) - // then - require.EqualError(s.T(), err, "the requested space is not available") - }) - }) + s.Run("user is not provisioned yet", func() { + // when + _, err := svc.GetClusterAccess("456-not-ready", "", tc.workspace, "", publicViewerEnabled) - s.Run("no member cluster found", func() { - s.Run("no member clusters", func() { - svc := service.NewMemberClusterService( - fake.MemberClusterServiceContext{ - Client: s, - Svcs: s.Application, - }, - func(si *service.ServiceImpl) { - si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { - return []*commoncluster.CachedToolchainCluster{} - } - }, - ) - s.Run("default workspace case", func() { - // when - _, err := svc.GetClusterAccess("789-ready", "", "", "") + // then + require.EqualError(s.T(), err, "user is not provisioned (yet)") + }) - // then - require.EqualError(s.T(), err, "no member clusters found") - }) - - s.Run("workspace context case", func() { - // when - _, err := svc.GetClusterAccess("789-ready", "", "smith2", "") - - // then - require.EqualError(s.T(), err, "no member clusters found") + }) + } }) - }) - s.Run("no member cluster with the given URL", func() { - svc := service.NewMemberClusterService( - fake.MemberClusterServiceContext{ - Client: s, - Svcs: s.Application, - }, - func(si *service.ServiceImpl) { - si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { - return s.memberClusters() + s.Run("unable to get space", func() { + s.Run("informer service returns error", func() { + original := inf.GetSpaceFunc + defer func() { // restore original GetSpaceFunc after test + inf.GetSpaceFunc = original + s.Application.MockInformerService(inf) + }() + inf.GetSpaceFunc = func(_ string) (*toolchainv1alpha1.Space, error) { // informer error + return nil, fmt.Errorf("oopsi woopsi") } - }, - ) + s.Application.MockInformerService(inf) - s.Run("default workspace case", func() { - // when - _, err := svc.GetClusterAccess("012-ready-unknown-cluster", "", "", "") + // when + _, err := svc.GetClusterAccess("789-ready", "", "smith2", "", publicViewerEnabled) - // then - require.EqualError(s.T(), err, "no member cluster found for the user") - }) + // then + // original error is only logged so that it doesn't reveal information about a space that may not belong to the requestor + require.EqualError(s.T(), err, "the requested space is not available") + }) - s.Run("workspace context case", func() { - // when - _, err := svc.GetClusterAccess("012-ready-unknown-cluster", "", "unknown-cluster", "") + s.Run("space not found", func() { + // when + _, err := svc.GetClusterAccess("789-ready", "", "unknown", "", publicViewerEnabled) // unknown workspace requested - // then - require.EqualError(s.T(), err, "no member cluster found for space 'unknown-cluster'") + // then + require.EqualError(s.T(), err, "the requested space is not available") + }) }) - }) - }) - - s.Run("member found", func() { - memberClient := commontest.NewFakeClient(s.T()) - memberArray := []*commoncluster.CachedToolchainCluster{ - { - Config: &commoncluster.Config{ - Name: "member-1", - APIEndpoint: "https://api.endpoint.member-1.com:6443", - RestConfig: &rest.Config{ - BearerToken: "def456", - }, - }, - }, - { - Config: &commoncluster.Config{ - Name: "member-2", - APIEndpoint: "https://api.endpoint.member-2.com:6443", - OperatorNamespace: "member-operator", - RestConfig: &rest.Config{ - BearerToken: "abc123", - }, - }, - Client: memberClient, - }, - { - Config: &commoncluster.Config{ - Name: "member-3", - APIEndpoint: "https://api.endpoint.member-3.com:6443", - RestConfig: &rest.Config{}, - }, - }, - } - svc := service.NewMemberClusterService( - fake.MemberClusterServiceContext{ - Client: s, - Svcs: s.Application, - }, - func(si *service.ServiceImpl) { - si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { - return memberArray - } - }, - ) - - s.Run("verify cluster access with route", func() { - memberClient.MockGet = func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - route, ok := obj.(*routev1.Route) - if ok && key.Namespace == "tekton-results" && key.Name == "tekton-results" { - route.Namespace = key.Namespace - route.Name = key.Name - route.Status.Ingress = []routev1.RouteIngress{ - { - Host: "myservice.endpoint.member-2.com", + s.Run("no member cluster found", func() { + s.Run("no member clusters", func() { + svc := service.NewMemberClusterService( + fake.MemberClusterServiceContext{ + Client: s, + Svcs: s.Application, }, - } - return nil - } - return memberClient.Client.Get(ctx, key, obj, opts...) - } - expectedToken := "abc123" // should match member 2 bearer token + func(si *service.ServiceImpl) { + si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { + return []*commoncluster.CachedToolchainCluster{} + } + }, + ) + s.Run("default workspace case", func() { + // when + _, err := svc.GetClusterAccess("789-ready", "", "", "", publicViewerEnabled) + + // then + require.EqualError(s.T(), err, "no member clusters found") + }) + + s.Run("workspace context case", func() { + // when + _, err := svc.GetClusterAccess("789-ready", "", "smith2", "", publicViewerEnabled) + + // then + require.EqualError(s.T(), err, "no member clusters found") + }) + }) - // when - ca, err := svc.GetClusterAccess("789-ready", "", "", "tekton-results") + s.Run("no member cluster with the given URL", func() { + svc := service.NewMemberClusterService( + fake.MemberClusterServiceContext{ + Client: s, + Svcs: s.Application, + }, + func(si *service.ServiceImpl) { + si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { + return s.memberClusters() + } + }, + ) - // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") - require.NoError(s.T(), err) - assert.Equal(s.T(), "smith2", ca.Username()) + s.Run("default workspace case", func() { + // when + _, err := svc.GetClusterAccess("012-ready-unknown-cluster", "", "", "", publicViewerEnabled) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, ""), ca) + // then + require.EqualError(s.T(), err, "no member cluster found for the user") + }) - s.Run("cluster access correct when username provided", func() { - // when - ca, err := svc.GetClusterAccess("", "smith@", "", "tekton-results") + s.Run("workspace context case", func() { + // when + _, err := svc.GetClusterAccess("012-ready-unknown-cluster", "", "unknown-cluster", "", publicViewerEnabled) - // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") - require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) - assert.Equal(s.T(), "smith2", ca.Username()) + // then + require.EqualError(s.T(), err, "no member cluster found for space 'unknown-cluster'") + }) + }) }) - s.Run("cluster access correct when using workspace context", func() { - // when - ca, err := svc.GetClusterAccess("789-ready", "", "smith2", "tekton-results") // workspace-context specified + s.Run("member found", func() { + memberClient := commontest.NewFakeClient(s.T()) + memberArray := []*commoncluster.CachedToolchainCluster{ + { + Config: &commoncluster.Config{ + Name: "member-1", + APIEndpoint: "https://api.endpoint.member-1.com:6443", + RestConfig: &rest.Config{ + BearerToken: "def456", + }, + }, + }, + { + Config: &commoncluster.Config{ + Name: "member-2", + APIEndpoint: "https://api.endpoint.member-2.com:6443", + OperatorNamespace: "member-operator", + RestConfig: &rest.Config{ + BearerToken: "abc123", + }, + }, + Client: memberClient, + }, + { + Config: &commoncluster.Config{ + Name: "member-3", + APIEndpoint: "https://api.endpoint.member-3.com:6443", + RestConfig: &rest.Config{}, + }, + }, + } - // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") - require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) - assert.Equal(s.T(), "smith2", ca.Username()) + svc := service.NewMemberClusterService( + fake.MemberClusterServiceContext{ + Client: s, + Svcs: s.Application, + }, + func(si *service.ServiceImpl) { + si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { + return memberArray + } + }, + ) - s.Run("another workspace on another cluster", func() { - // when - mC := commontest.NewFakeClient(s.T()) - mC.MockGet = func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + s.Run("verify cluster access with route", func() { + memberClient.MockGet = func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { route, ok := obj.(*routev1.Route) if ok && key.Namespace == "tekton-results" && key.Name == "tekton-results" { route.Namespace = key.Namespace route.Name = key.Name route.Status.Ingress = []routev1.RouteIngress{ { - Host: "api.endpoint.member-1.com:6443", + Host: "myservice.endpoint.member-2.com", }, } return nil } return memberClient.Client.Get(ctx, key, obj, opts...) } - memberArray[0].Client = mC - ca, err := svc.GetClusterAccess("789-ready", "", "teamspace", "tekton-results") // workspace-context specified + expectedToken := "abc123" // should match member 2 bearer token + + // when + ca, err := svc.GetClusterAccess("789-ready", "", "", "tekton-results", publicViewerEnabled) // then require.NoError(s.T(), err) require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://api.endpoint.member-1.com:6443") + expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, "def456", "smith"), ca) assert.Equal(s.T(), "smith2", ca.Username()) + + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, ""), ca) + + s.Run("cluster access correct when username provided", func() { + // when + ca, err := svc.GetClusterAccess("", "smith@", "", "tekton-results", publicViewerEnabled) + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + }) + + s.Run("cluster access correct when using workspace context", func() { + // when + ca, err := svc.GetClusterAccess("789-ready", "", "smith2", "tekton-results", publicViewerEnabled) // workspace-context specified + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://myservice.endpoint.member-2.com") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + + s.Run("another workspace on another cluster", func() { + // when + mC := commontest.NewFakeClient(s.T()) + mC.MockGet = func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + route, ok := obj.(*routev1.Route) + if ok && key.Namespace == "tekton-results" && key.Name == "tekton-results" { + route.Namespace = key.Namespace + route.Name = key.Name + route.Status.Ingress = []routev1.RouteIngress{ + { + Host: "api.endpoint.member-1.com:6443", + }, + } + return nil + } + return memberClient.Client.Get(ctx, key, obj, opts...) + } + memberArray[0].Client = mC + ca, err := svc.GetClusterAccess("789-ready", "", "teamspace", "tekton-results", publicViewerEnabled) // workspace-context specified + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://api.endpoint.member-1.com:6443") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, "def456", "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + }) + }) + }) + + s.Run("verify cluster access no route", func() { + memberClient.MockGet = nil + expectedToken := "abc123" // should match member 2 bearer token + + // when + ca, err := svc.GetClusterAccess("789-ready", "", "", "", publicViewerEnabled) + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") + require.NoError(s.T(), err) + assert.Equal(s.T(), "smith2", ca.Username()) + + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, ""), ca) + + s.Run("cluster access correct when username provided", func() { + // when + ca, err := svc.GetClusterAccess("", "smith@", "", "", publicViewerEnabled) + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + }) + + s.Run("cluster access correct when using workspace context", func() { + // when + ca, err := svc.GetClusterAccess("789-ready", "", "smith2", "", publicViewerEnabled) // workspace-context specified + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + + s.Run("another workspace on another cluster", func() { + // when + ca, err := svc.GetClusterAccess("789-ready", "", "teamspace", "", publicViewerEnabled) // workspace-context specified + + // then + require.NoError(s.T(), err) + require.NotNil(s.T(), ca) + expectedURL, err := url.Parse("https://api.endpoint.member-1.com:6443") + require.NoError(s.T(), err) + s.assertClusterAccess(access.NewClusterAccess(*expectedURL, "def456", "smith"), ca) + assert.Equal(s.T(), "smith2", ca.Username()) + }) + }) }) }) }) + } - s.Run("verify cluster access no route", func() { - memberClient.MockGet = nil - expectedToken := "abc123" // should match member 2 bearer token - + // public-viewer specific tests + s.Run("user is public-viewer", func() { + s.Run("has no default workspace", func() { // when - ca, err := svc.GetClusterAccess("789-ready", "", "", "") + ca, err := svc.GetClusterAccess("", toolchainv1alpha1.KubesawAuthenticatedUsername, "", "", true) // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") - require.NoError(s.T(), err) - assert.Equal(s.T(), "smith2", ca.Username()) + require.EqualError(s.T(), err, "user is not provisioned (yet)") + require.Nil(s.T(), ca) + }) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, ""), ca) + s.Run("get workspace by name", func() { + svc := service.NewMemberClusterService( + fake.MemberClusterServiceContext{ + Client: s, + Svcs: s.Application, + }, + func(si *service.ServiceImpl) { + si.GetMembersFunc = func(_ ...commoncluster.Condition) []*commoncluster.CachedToolchainCluster { + return s.memberClusters() + } + }, + ) - s.Run("cluster access correct when username provided", func() { + s.Run("public-viewer is disabled", func() { // when - ca, err := svc.GetClusterAccess("", "smith@", "", "") + ca, err := svc.GetClusterAccess("", toolchainv1alpha1.KubesawAuthenticatedUsername, "smith2", "", false) // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) + require.EqualError(s.T(), err, "user is not provisioned (yet)") + require.Nil(s.T(), ca) + }) + + s.Run("ready space", func() { + //given expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) - assert.Equal(s.T(), "smith2", ca.Username()) - }) + expectedClusterAccess := access.NewClusterAccess(*expectedURL, "token", toolchainv1alpha1.KubesawAuthenticatedUsername) - s.Run("cluster access correct when using workspace context", func() { // when - ca, err := svc.GetClusterAccess("789-ready", "", "smith2", "") // workspace-context specified + clusterAccess, err := svc.GetClusterAccess("", toolchainv1alpha1.KubesawAuthenticatedUsername, "smith2", "", true) // then require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://api.endpoint.member-2.com:6443") - require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, expectedToken, "smith"), ca) - assert.Equal(s.T(), "smith2", ca.Username()) + require.Equal(s.T(), expectedClusterAccess, clusterAccess) + }) - s.Run("another workspace on another cluster", func() { - // when - ca, err := svc.GetClusterAccess("789-ready", "", "teamspace", "") // workspace-context specified + s.Run("not-available space", func() { + // when + clusterAccess, err := svc.GetClusterAccess("", toolchainv1alpha1.KubesawAuthenticatedUsername, "456-not-ready", "", true) - // then - require.NoError(s.T(), err) - require.NotNil(s.T(), ca) - expectedURL, err := url.Parse("https://api.endpoint.member-1.com:6443") - require.NoError(s.T(), err) - s.assertClusterAccess(access.NewClusterAccess(*expectedURL, "def456", "smith"), ca) - assert.Equal(s.T(), "smith2", ca.Username()) - }) + // then + require.EqualError(s.T(), err, "the requested space is not available") + require.Nil(s.T(), clusterAccess) + }) + + s.Run("ready space with unknown cluster", func() { + // when + clusterAccess, err := svc.GetClusterAccess("", toolchainv1alpha1.KubesawAuthenticatedUsername, "012-ready-unknown-cluster", "", true) + + // then + require.EqualError(s.T(), err, "the requested space is not available") + require.Nil(s.T(), clusterAccess) }) }) }) @@ -441,6 +533,9 @@ func (s *TestClusterServiceSuite) memberClusters() []*commoncluster.CachedToolch Name: clusterName, APIEndpoint: fmt.Sprintf("https://api.endpoint.%s.com:6443", clusterName), OperatorNamespace: "member-operator", + RestConfig: &rest.Config{ + BearerToken: "token", + }, }, Client: nil, }) diff --git a/test/fake/proxy.go b/test/fake/proxy.go index bad15e29..3b539a66 100644 --- a/test/fake/proxy.go +++ b/test/fake/proxy.go @@ -45,7 +45,7 @@ type fakeClusterService struct { fakeApp *ProxyFakeApp } -func (f *fakeClusterService) GetClusterAccess(userID, _, _, _ string) (*access.ClusterAccess, error) { +func (f *fakeClusterService) GetClusterAccess(userID, _, _, _ string, _ bool) (*access.ClusterAccess, error) { return f.fakeApp.Accesses[userID], f.fakeApp.Err }