diff --git a/.github/ISSUE_TEMPLATE/testplan.md b/.github/ISSUE_TEMPLATE/testplan.md index 870e9592e8b31..e117b348cd13a 100644 --- a/.github/ISSUE_TEMPLATE/testplan.md +++ b/.github/ISSUE_TEMPLATE/testplan.md @@ -979,10 +979,14 @@ manualy testing. - [ ] Self-hosted MariaDB. - [ ] Self-hosted MongoDB. - [ ] Self-hosted CockroachDB. - - [ ] Self-hosted Redis. + - [ ] Self-hosted Redis/Valkey. - [ ] Self-hosted Redis Cluster. - [ ] Self-hosted MSSQL. - [ ] Self-hosted MSSQL with PKINIT authentication. + - [ ] Self-hosted Elasticsearch. + - [ ] Self-hosted Cassandra/ScyllaDB. + - [ ] Self-hosted Oracle. + - [ ] Self-hosted ClickHouse. - [ ] AWS Aurora Postgres. - [ ] AWS Aurora MySQL. - [ ] MySQL server version reported by Teleport is correct. @@ -992,32 +996,36 @@ manualy testing. - [ ] Verify connection to external AWS account works with `assume_role_arn: ""` and `external_id: ""` - [ ] AWS ElastiCache. - [ ] AWS MemoryDB. + - [ ] AWS OpenSearch. + - [ ] AWS Dynamodb. + - [ ] Verify connection to external AWS account works with `assume_role_arn: ""` and `external_id: ""` + - [ ] AWS DocumentDB + - [ ] AWS Keyspaces + - [ ] Verify connection to external AWS account works with `assume_role_arn: ""` and `external_id: ""` - [ ] GCP Cloud SQL Postgres. - [ ] GCP Cloud SQL MySQL. - [ ] GCP Cloud Spanner. - - [ ] Snowflake. - [ ] Azure Cache for Redis. - [x] Azure single-server MySQL and Postgres (EOL Sep 2024 and Mar 2025, skip) - - [ ] Azure flexible-server MySQL and Postgres - - [ ] Elasticsearch. - - [ ] OpenSearch. - - [ ] Cassandra/ScyllaDB. - - [ ] Verify connection to external AWS account works with `assume_role_arn: ""` and `external_id: ""` - - [ ] Dynamodb. - - [ ] Verify connection to external AWS account works with `assume_role_arn: ""` and `external_id: ""` + - [ ] Azure flexible-server MySQL + - [ ] Azure flexible-server Postgres - [ ] Azure SQL Server. - - [ ] Oracle. - - [ ] ClickHouse. + - [ ] Snowflake. + - [ ] MongoDB Atlas. - [ ] Connect to a database within a remote cluster via a trusted cluster. - [ ] Self-hosted Postgres. - [ ] Self-hosted MySQL. - [ ] Self-hosted MariaDB. - [ ] Self-hosted MongoDB. - [ ] Self-hosted CockroachDB. - - [ ] Self-hosted Redis. + - [ ] Self-hosted Redis/Valkey. - [ ] Self-hosted Redis Cluster. - [ ] Self-hosted MSSQL. - [ ] Self-hosted MSSQL with PKINIT authentication. + - [ ] Self-hosted Elasticsearch. + - [ ] Self-hosted Cassandra/ScyllaDB. + - [ ] Self-hosted Oracle. + - [ ] Self-hosted ClickHouse. - [ ] AWS Aurora Postgres. - [ ] AWS Aurora MySQL. - [ ] AWS RDS Proxy (MySQL, Postgres, MariaDB, or SQL Server) @@ -1025,20 +1033,20 @@ manualy testing. - [ ] AWS Redshift Serverless. - [ ] AWS ElastiCache. - [ ] AWS MemoryDB. + - [ ] AWS OpenSearch. + - [ ] AWS Dynamodb. + - [ ] AWS DocumentDB + - [ ] AWS Keyspaces - [ ] GCP Cloud SQL Postgres. - [ ] GCP Cloud SQL MySQL. - [ ] GCP Cloud Spanner. - - [ ] Snowflake. - [ ] Azure Cache for Redis. - [x] Azure single-server MySQL and Postgres (EOL Sep 2024 and Mar 2025, skip) - - [ ] Azure flexible-server MySQL and Postgres - - [ ] Elasticsearch. - - [ ] OpenSearch. - - [ ] Cassandra/ScyllaDB. - - [ ] Dynamodb. + - [ ] Azure flexible-server MySQL + - [ ] Azure flexible-server Postgres - [ ] Azure SQL Server. - - [ ] Oracle. - - [ ] ClickHouse. + - [ ] Snowflake. + - [ ] MongoDB Atlas. - [ ] Verify auto user provisioning. Verify all supported modes: `keep`, `best_effort_drop` - [ ] Self-hosted Postgres. @@ -1084,6 +1092,7 @@ manualy testing. - [ ] Can detect and register ElastiCache Redis clusters. - [ ] Can detect and register MemoryDB clusters. - [ ] Can detect and register OpenSearch domains. + - [ ] Can detect and register DocumentDB clusters. - [ ] Azure - [ ] Can detect and register MySQL and Postgres single-server instances. - [ ] Can detect and register MySQL and Postgres flexible-server instances. @@ -1098,6 +1107,11 @@ manualy testing. - [ ] Verify searching for all columns in the search bar works - [ ] Verify you can sort by all columns except `labels` - [ ] `tsh bench` load tests (instructions on Notion -> Database Access -> Load test) +- [ ] Verify database session player + - [ ] Web UI + - [ ] Postgres + - [ ] `tsh play` + - [ ] Postgres ## TLS Routing @@ -1574,13 +1588,21 @@ Docs: [IP Pinning](https://goteleport.com/docs/access-controls/guides/ip-pinning - [ ] Verify that users can run custom audit queries. - [ ] Verify that the Privileged Access Report is generated and periodically refreshed. -- [ ] Access List +- [ ] Access Lists - [ ] Verify Access List membership/ownership/expiration date. - - [ ] Verify permissions granted by Access List membership. - - [ ] Verify permissions granted by Access List ownership. - - [ ] Verify Access List Review. - - [ ] verify Access LIst Promotion. - - [ ] Verify that owners can only add/remove members and not change other properties. + - [ ] Verify permissions granted by Access List membership. + - [ ] Verify permissions granted by Access List ownership. + - [ ] Verify Access List Review. + - [ ] verify Access LIst Promotion. + - [ ] Verify that owners can only add/remove members and not change other properties. + - [ ] Nested Access Lists + - [ ] Verify that Access Lists can be added as members or owners of other Access Lists. + - [ ] Verify that member grants from ancestor lists are inherited by members of nested Access Lists added as members. + - [ ] Verify that owner grants from ancestor lists are inherited by members of nested Access Lists added as owners. + - [ ] Verify that Access List Review and Promotion work with nested Access Lists. + - [ ] Verify that manually deleting a nested Access List used as a member or owner does not break UserLoginState generation or listing Access Lists. + - [ ] Verify that an Access List can be added as a member or owner of another Access List using `tctl`. + - [ ] Verify that Access Lists added as members or owners of other Access Lists using `tctl` are validated (no circular references, no nesting > 10 levels). - [ ] Verify Okta Sync Service - [ ] Verify Okta Plugin configuration. @@ -1590,6 +1612,7 @@ Docs: [IP Pinning](https://goteleport.com/docs/access-controls/guides/ip-pinning - [ ] Verify that users/apps/groups are synced from Okta to Teleport. - [ ] Verify the custom `okta_import_rule` rule configuration. - [ ] Verify that users/apps/groups are displayed in the Teleport Web UI. + - [ ] Verify that users/groups are flattened on import, and are not duplicated on sync when their membership is inherited via nested Access Lists. - [ ] Verify that a user is locked/removed from Teleport when the user is Suspended/Deactivated in Okta. - [ ] Verify access to Okta apps granted by access_list/access_request. diff --git a/.github/ISSUE_TEMPLATE/webtestplan.md b/.github/ISSUE_TEMPLATE/webtestplan.md index 96b15dd065414..595a7955f2ddb 100644 --- a/.github/ISSUE_TEMPLATE/webtestplan.md +++ b/.github/ISSUE_TEMPLATE/webtestplan.md @@ -574,6 +574,45 @@ With the previous role you created from `Strategy Reason`, change `request_acces - [ ] Verify after login, dashboard is rendered as normal +## Access Lists + +Not available for OSS + +- Creating new Access List: + - [ ] Verify that traits/roles are not be required in order to create + - [ ] Verify that one can be created with members and owners + - [ ] Verify the web cache is updated (new list should appear under "Access Lists" page without reloading) +- Deleting existing Access List: + - [ ] Verify the web cache is updated (deleted list should disappear from "Access Lists" page without reloading) + - [ ] Verify that an Access List used as a member or owner in other lists cannot be deleted (should show a warning) +- Reviewing Access List: + - [ ] Verify that after reviewing, the web cache is updated (list cards should show any member/role changes) +- Updating (renaming, removing members, adding members): + - [ ] Verify the web cache is updated (changes to name/members appear under "Access Lists" page without reloading) +- [ ] Verify Access List search is preserved between sub-route navigation (clicking into specific List and navigating back) +- Can manage members/owners for an existing Access List: + - [ ] Verify that existing Users: + - [ ] Can be enrolled as members and owners + - [ ] Enrolled as members or owners can be removed + - [ ] Verify that existing Access Lists: + - [ ] Can be enrolled as members and owners + - [ ] Enrolled as members or owners can be removed + - [ ] Verify that an Access List cannot be added as a member or owner: + - [ ] If it is already a member or owner + - [ ] If it would result in a circular reference (ACL A -> ACL B -> ACL A) + - [ ] If the depth of the inheritance would exceed 10 levels + - [ ] If it includes yourself (and you lack RBAC) + - [ ] Verify that non-existing Members and Owners can be enrolled in an existing List (e.g., SSO users) +- Inherited grants are properly calculated and displayed: + - [ ] Verify that members of a nested Access List: + - [ ] Added as a member to another Access List inherit its Member grants + - [ ] Added as an owner to another Access List inherit its Owner grants + - [ ] That do not meet Membership Requirements in a Nested List do not inherit any Grants from Parent Lists + - [ ] That do not meet the Parent List's Membership/Ownership Requirements do not inherit its Member/Owner Grants + - [ ] Verify that owners of Access Lists added as Members/Owners to other Access Lists do *not* inherit any Grants + - [ ] Verify that inherited grants are updated on reload or navigating away from / back to Access List View/Edit route + - [ ] Verify that 'View More' exists and can be clicked under the 'Inherited Member Grants' section if inherited grants overflows the container + ## Web Terminal (aka console) - [ ] Verify that top nav has a user menu (Main and Logout) diff --git a/.github/workflows/doc-tests.yaml b/.github/workflows/doc-tests.yaml index 272c2de479c5c..3a3e12365306b 100644 --- a/.github/workflows/doc-tests.yaml +++ b/.github/workflows/doc-tests.yaml @@ -47,9 +47,28 @@ jobs: repository: "gravitational/docs" path: "docs" - - name: Prepare docs site configuration + # Cache node_modules. Unlike the example in the actions/cache repo, this + # caches the node_modules directory instead of the yarn cache. This is + # because yarn needs to build fresh packages even when it copies files + # from the yarn cache into node_modules. + # See: + # https://github.com/actions/cache/blob/main/examples.md#node---yarn + - uses: actions/cache@v4 + id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`) + with: + path: '${{ github.workspace }}/docs/node_modules' + key: ${{ runner.os }}-yarn-${{ hashFiles(format('{0}/docs/yarn.lock', github.workspace)) }} + restore-keys: | + ${{ runner.os }}-yarn- + + - name: Install docs site dependencies + working-directory: docs + if: ${{ steps.yarn-cache.outputs.cache-hit != 'true' }} # Prevent occasional `yarn install` executions that run indefinitely timeout-minutes: 10 + run: yarn install + + - name: Prepare docs site configuration # The environment we use for linting the docs differs from the one we # use for the live docs site in that we only test a single version of # the content. @@ -85,7 +104,6 @@ jobs: git submodule add --force -b $BRANCH -- https://github.com/gravitational/teleport cd $GITHUB_WORKSPACE/docs echo "{\"versions\": [{\"name\": \"teleport\", \"branch\": \"$BRANCH\", \"deprecated\": false}]}" > $GITHUB_WORKSPACE/docs/config.json - yarn install yarn build-node - name: Check spelling @@ -95,7 +113,8 @@ jobs: run: cd $GITHUB_WORKSPACE/docs && yarn markdown-lint - name: Test the docs build - run: cd $GITHUB_WORKSPACE/docs && yarn install && yarn build + working-directory: docs + run: yarn build stylecheck: name: Lint docs prose style diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 2c31d0ef7ec92..860aeefeaecf9 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -63,7 +63,6 @@ jobs: - 'docs/pages/admin-guides/**' - 'docs/pages/enroll-resources/**' - 'docs/pages/reference/operator-resources/**' - - 'docs/pages/reference/terraform-provider.mdx' - 'docs/pages/reference/terraform-provider/**' - 'examples/chart/teleport-cluster/charts/teleport-operator/operator-crds' diff --git a/Makefile b/Makefile index a6cf23c23e277..82e210bbad320 100644 --- a/Makefile +++ b/Makefile @@ -820,10 +820,6 @@ RERUN := $(TOOLINGDIR)/bin/rerun $(RERUN): $(wildcard $(TOOLINGDIR)/cmd/rerun/*.go) cd $(TOOLINGDIR) && go build -o "$@" ./cmd/rerun -RELEASE_NOTES_GEN := $(TOOLINGDIR)/bin/release-notes -$(RELEASE_NOTES_GEN): $(wildcard $(TOOLINGDIR)/cmd/release-notes/*.go) - cd $(TOOLINGDIR) && go build -o "$@" ./cmd/release-notes - .PHONY: tooling tooling: ensure-gotestsum $(DIFF_TEST) @@ -1822,11 +1818,13 @@ changelog: # does not match version set it will fail to create a release. If tag doesn't exist it # will also fail to create a release. # -# For more information on release notes generation see ./build.assets/tooling/cmd/release-notes +# For more information on release notes generation see: +# https://github.com/gravitational/shared-workflows/tree/gus/release-notes/tools/release-notes#readme +RELEASE_NOTES_GEN = github.com/gravitational/shared-workflows/tools/release-notes@latest .PHONY: create-github-release create-github-release: LATEST = false create-github-release: GITHUB_RELEASE_LABELS = "" -create-github-release: $(RELEASE_NOTES_GEN) +create-github-release: @NOTES=$$($(RELEASE_NOTES_GEN) --labels=$(GITHUB_RELEASE_LABELS) $(VERSION) CHANGELOG.md) && gh release create v$(VERSION) \ -t "Teleport $(VERSION)" \ --latest=$(LATEST) \ diff --git a/api/client/client.go b/api/client/client.go index ee1ea80087791..5f24aa66b9fc3 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -664,6 +664,9 @@ type Config struct { // MFAPromptConstructor is used to create MFA prompts when needed. // If nil, the client will not prompt for MFA. MFAPromptConstructor mfa.PromptConstructor + // SSOMFACeremonyConstructor is used to handle SSO MFA when needed. + // If nil, the client will not prompt for MFA. + SSOMFACeremonyConstructor mfa.SSOMFACeremonyConstructor } // CheckAndSetDefaults checks and sets default config values. @@ -730,6 +733,11 @@ func (c *Client) SetMFAPromptConstructor(pc mfa.PromptConstructor) { c.c.MFAPromptConstructor = pc } +// SetSSOMFACeremonyConstructor sets the SSO MFA ceremony constructor for this client. +func (c *Client) SetSSOMFACeremonyConstructor(scc mfa.SSOMFACeremonyConstructor) { + c.c.SSOMFACeremonyConstructor = scc +} + // Close closes the Client connection to the auth server. func (c *Client) Close() error { if c.setClosed() && c.conn != nil { diff --git a/api/client/dynamicwindows/dynamicwindows.go b/api/client/dynamicwindows/dynamicwindows.go index 32ba1762f1aed..19d89e619fac8 100644 --- a/api/client/dynamicwindows/dynamicwindows.go +++ b/api/client/dynamicwindows/dynamicwindows.go @@ -85,9 +85,25 @@ func (c *Client) UpdateDynamicWindowsDesktop(ctx context.Context, desktop types. } } +func (c *Client) UpsertDynamicWindowsDesktop(ctx context.Context, desktop types.DynamicWindowsDesktop) (types.DynamicWindowsDesktop, error) { + switch desktop := desktop.(type) { + case *types.DynamicWindowsDesktopV1: + desktop, err := c.grpcClient.UpsertDynamicWindowsDesktop(ctx, &dynamicwindows.UpsertDynamicWindowsDesktopRequest{ + Desktop: desktop, + }) + return desktop, trace.Wrap(err) + default: + return nil, trace.BadParameter("unknown desktop type: %T", desktop) + } +} + func (c *Client) DeleteDynamicWindowsDesktop(ctx context.Context, name string) error { _, err := c.grpcClient.DeleteDynamicWindowsDesktop(ctx, &dynamicwindows.DeleteDynamicWindowsDesktopRequest{ Name: name, }) return trace.Wrap(err) } + +func (c *Client) DeleteAllDynamicWindowsDesktops(ctx context.Context) error { + return trace.NotImplemented("DeleteAllDynamicWindowsDesktops is not supported in the gRPC client") +} diff --git a/api/client/mfa.go b/api/client/mfa.go index beba5b20c79dd..8db9af2b318f0 100644 --- a/api/client/mfa.go +++ b/api/client/mfa.go @@ -30,6 +30,7 @@ func (c *Client) PerformMFACeremony(ctx context.Context, challengeRequest *proto mfaCeremony := &mfa.Ceremony{ CreateAuthenticateChallenge: c.CreateAuthenticateChallenge, PromptConstructor: c.c.MFAPromptConstructor, + SSOMFACeremonyConstructor: c.c.SSOMFACeremonyConstructor, } return mfaCeremony.Run(ctx, challengeRequest, promptOpts...) } diff --git a/api/client/webclient/webclient.go b/api/client/webclient/webclient.go index 95ae0ea9747c3..b5c684ebfb628 100644 --- a/api/client/webclient/webclient.go +++ b/api/client/webclient/webclient.go @@ -68,6 +68,9 @@ type Config struct { Timeout time.Duration // TraceProvider is used to retrieve a Tracer for creating spans TraceProvider oteltrace.TracerProvider + // UpdateGroup is used to vary the webapi response based on the + // client's auto-update group. + UpdateGroup string } // CheckAndSetDefaults checks and sets defaults @@ -169,9 +172,18 @@ func Find(cfg *Config) (*PingResponse, error) { ctx, span := cfg.TraceProvider.Tracer("webclient").Start(cfg.Context, "webclient/Find") defer span.End() - endpoint := fmt.Sprintf("https://%s/webapi/find", cfg.ProxyAddr) + endpoint := &url.URL{ + Scheme: "https", + Host: cfg.ProxyAddr, + Path: "/webapi/find", + } + if cfg.UpdateGroup != "" { + endpoint.RawQuery = url.Values{ + "group": []string{cfg.UpdateGroup}, + }.Encode() + } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil) if err != nil { return nil, trace.Wrap(err) } @@ -205,12 +217,21 @@ func Ping(cfg *Config) (*PingResponse, error) { ctx, span := cfg.TraceProvider.Tracer("webclient").Start(cfg.Context, "webclient/Ping") defer span.End() - endpoint := fmt.Sprintf("https://%s/webapi/ping", cfg.ProxyAddr) + endpoint := &url.URL{ + Scheme: "https", + Host: cfg.ProxyAddr, + Path: "/webapi/ping", + } + if cfg.UpdateGroup != "" { + endpoint.RawQuery = url.Values{ + "group": []string{cfg.UpdateGroup}, + }.Encode() + } if cfg.ConnectorName != "" { - endpoint = fmt.Sprintf("%s/%s", endpoint, cfg.ConnectorName) + endpoint = endpoint.JoinPath(cfg.ConnectorName) } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil) if err != nil { return nil, trace.Wrap(err) } @@ -305,6 +326,10 @@ type PingResponse struct { // reserved: license_warnings ([]string) // AutomaticUpgrades describes whether agents should automatically upgrade. AutomaticUpgrades bool `json:"automatic_upgrades"` + // Edition represents the Teleport edition. Possible values are "oss", "ent", and "community". + Edition string `json:"edition"` + // FIPS represents if Teleport is using FIPS-compliant cryptography. + FIPS bool `json:"fips"` } // PingErrorResponse contains the error from /webapi/ping. @@ -336,6 +361,12 @@ type AutoUpdateSettings struct { ToolsVersion string `json:"tools_version"` // ToolsMode defines mode client auto update feature `enabled|disabled`. ToolsMode string `json:"tools_mode"` + // AgentVersion defines the version of teleport that agents enrolled into autoupdates should run. + AgentVersion string `json:"agent_version"` + // AgentAutoUpdate indicates if the requesting agent should attempt to update now. + AgentAutoUpdate bool `json:"agent_auto_update"` + // AgentUpdateJitterSeconds defines the jitter time an agent should wait before updating. + AgentUpdateJitterSeconds int `json:"agent_update_jitter_seconds"` } // KubeProxySettings is kubernetes proxy settings diff --git a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go index b09748485f162..3523ac1e79d57 100644 --- a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go +++ b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go @@ -200,7 +200,7 @@ func (x *GetDynamicWindowsDesktopRequest) GetName() string { return "" } -// CreateDynamicWindowsDesktopRequest is a request for a specific dynamic Windows desktop. +// CreateDynamicWindowsDesktopRequest is used for creating new dynamic Windows desktops. type CreateDynamicWindowsDesktopRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -247,7 +247,7 @@ func (x *CreateDynamicWindowsDesktopRequest) GetDesktop() *types.DynamicWindowsD return nil } -// UpdateDynamicWindowsDesktopRequest is a request for a specific dynamic Windows desktop. +// UpdateDynamicWindowsDesktopRequest is used for updating existing dynamic Windows desktops. type UpdateDynamicWindowsDesktopRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -294,6 +294,53 @@ func (x *UpdateDynamicWindowsDesktopRequest) GetDesktop() *types.DynamicWindowsD return nil } +// UpsertDynamicWindowsDesktopRequest is used for upserting dynamic Windows desktops. +type UpsertDynamicWindowsDesktopRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // desktop to be upserted + Desktop *types.DynamicWindowsDesktopV1 `protobuf:"bytes,1,opt,name=desktop,proto3" json:"desktop,omitempty"` +} + +func (x *UpsertDynamicWindowsDesktopRequest) Reset() { + *x = UpsertDynamicWindowsDesktopRequest{} + mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpsertDynamicWindowsDesktopRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpsertDynamicWindowsDesktopRequest) ProtoMessage() {} + +func (x *UpsertDynamicWindowsDesktopRequest) ProtoReflect() protoreflect.Message { + mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpsertDynamicWindowsDesktopRequest.ProtoReflect.Descriptor instead. +func (*UpsertDynamicWindowsDesktopRequest) Descriptor() ([]byte, []int) { + return file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDescGZIP(), []int{5} +} + +func (x *UpsertDynamicWindowsDesktopRequest) GetDesktop() *types.DynamicWindowsDesktopV1 { + if x != nil { + return x.Desktop + } + return nil +} + // DeleteDynamicWindowsDesktopRequest is a request to delete a Windows desktop host. type DeleteDynamicWindowsDesktopRequest struct { state protoimpl.MessageState @@ -306,7 +353,7 @@ type DeleteDynamicWindowsDesktopRequest struct { func (x *DeleteDynamicWindowsDesktopRequest) Reset() { *x = DeleteDynamicWindowsDesktopRequest{} - mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[5] + mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -318,7 +365,7 @@ func (x *DeleteDynamicWindowsDesktopRequest) String() string { func (*DeleteDynamicWindowsDesktopRequest) ProtoMessage() {} func (x *DeleteDynamicWindowsDesktopRequest) ProtoReflect() protoreflect.Message { - mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[5] + mi := &file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -331,7 +378,7 @@ func (x *DeleteDynamicWindowsDesktopRequest) ProtoReflect() protoreflect.Message // Deprecated: Use DeleteDynamicWindowsDesktopRequest.ProtoReflect.Descriptor instead. func (*DeleteDynamicWindowsDesktopRequest) Descriptor() ([]byte, []int) { - return file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDescGZIP(), []int{5} + return file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDescGZIP(), []int{6} } func (x *DeleteDynamicWindowsDesktopRequest) GetName() string { @@ -383,11 +430,17 @@ var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDesc = []byt 0x0a, 0x07, 0x64, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x56, 0x31, 0x52, + 0x07, 0x64, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x22, 0x5e, 0x0a, 0x22, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, + 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, + 0x0a, 0x07, 0x64, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, + 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x56, 0x31, 0x52, 0x07, 0x64, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x22, 0x38, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x32, 0xa3, 0x05, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, + 0x6d, 0x65, 0x32, 0xa2, 0x06, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x9b, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x73, 0x12, 0x3d, 0x2e, 0x74, 0x65, @@ -422,21 +475,29 @@ var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDesc = []byt 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x56, - 0x31, 0x12, 0x75, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x31, 0x12, 0x7d, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x12, 0x3e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x6d, 0x69, 0x63, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x60, 0x5a, 0x5e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x74, - 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x1a, 0x1e, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x56, 0x31, + 0x12, 0x75, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x12, + 0x3e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x44, 0x65, 0x73, 0x6b, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x60, 0x5a, 0x5e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -451,36 +512,40 @@ func file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDescGZIP() return file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDescData } -var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_goTypes = []any{ (*ListDynamicWindowsDesktopsRequest)(nil), // 0: teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsRequest (*ListDynamicWindowsDesktopsResponse)(nil), // 1: teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsResponse (*GetDynamicWindowsDesktopRequest)(nil), // 2: teleport.dynamicwindows.v1.GetDynamicWindowsDesktopRequest (*CreateDynamicWindowsDesktopRequest)(nil), // 3: teleport.dynamicwindows.v1.CreateDynamicWindowsDesktopRequest (*UpdateDynamicWindowsDesktopRequest)(nil), // 4: teleport.dynamicwindows.v1.UpdateDynamicWindowsDesktopRequest - (*DeleteDynamicWindowsDesktopRequest)(nil), // 5: teleport.dynamicwindows.v1.DeleteDynamicWindowsDesktopRequest - (*types.DynamicWindowsDesktopV1)(nil), // 6: types.DynamicWindowsDesktopV1 - (*emptypb.Empty)(nil), // 7: google.protobuf.Empty + (*UpsertDynamicWindowsDesktopRequest)(nil), // 5: teleport.dynamicwindows.v1.UpsertDynamicWindowsDesktopRequest + (*DeleteDynamicWindowsDesktopRequest)(nil), // 6: teleport.dynamicwindows.v1.DeleteDynamicWindowsDesktopRequest + (*types.DynamicWindowsDesktopV1)(nil), // 7: types.DynamicWindowsDesktopV1 + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty } var file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_depIdxs = []int32{ - 6, // 0: teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsResponse.desktops:type_name -> types.DynamicWindowsDesktopV1 - 6, // 1: teleport.dynamicwindows.v1.CreateDynamicWindowsDesktopRequest.desktop:type_name -> types.DynamicWindowsDesktopV1 - 6, // 2: teleport.dynamicwindows.v1.UpdateDynamicWindowsDesktopRequest.desktop:type_name -> types.DynamicWindowsDesktopV1 - 0, // 3: teleport.dynamicwindows.v1.DynamicWindowsService.ListDynamicWindowsDesktops:input_type -> teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsRequest - 2, // 4: teleport.dynamicwindows.v1.DynamicWindowsService.GetDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.GetDynamicWindowsDesktopRequest - 3, // 5: teleport.dynamicwindows.v1.DynamicWindowsService.CreateDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.CreateDynamicWindowsDesktopRequest - 4, // 6: teleport.dynamicwindows.v1.DynamicWindowsService.UpdateDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.UpdateDynamicWindowsDesktopRequest - 5, // 7: teleport.dynamicwindows.v1.DynamicWindowsService.DeleteDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.DeleteDynamicWindowsDesktopRequest - 1, // 8: teleport.dynamicwindows.v1.DynamicWindowsService.ListDynamicWindowsDesktops:output_type -> teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsResponse - 6, // 9: teleport.dynamicwindows.v1.DynamicWindowsService.GetDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 - 6, // 10: teleport.dynamicwindows.v1.DynamicWindowsService.CreateDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 - 6, // 11: teleport.dynamicwindows.v1.DynamicWindowsService.UpdateDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 - 7, // 12: teleport.dynamicwindows.v1.DynamicWindowsService.DeleteDynamicWindowsDesktop:output_type -> google.protobuf.Empty - 8, // [8:13] is the sub-list for method output_type - 3, // [3:8] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 7, // 0: teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsResponse.desktops:type_name -> types.DynamicWindowsDesktopV1 + 7, // 1: teleport.dynamicwindows.v1.CreateDynamicWindowsDesktopRequest.desktop:type_name -> types.DynamicWindowsDesktopV1 + 7, // 2: teleport.dynamicwindows.v1.UpdateDynamicWindowsDesktopRequest.desktop:type_name -> types.DynamicWindowsDesktopV1 + 7, // 3: teleport.dynamicwindows.v1.UpsertDynamicWindowsDesktopRequest.desktop:type_name -> types.DynamicWindowsDesktopV1 + 0, // 4: teleport.dynamicwindows.v1.DynamicWindowsService.ListDynamicWindowsDesktops:input_type -> teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsRequest + 2, // 5: teleport.dynamicwindows.v1.DynamicWindowsService.GetDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.GetDynamicWindowsDesktopRequest + 3, // 6: teleport.dynamicwindows.v1.DynamicWindowsService.CreateDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.CreateDynamicWindowsDesktopRequest + 4, // 7: teleport.dynamicwindows.v1.DynamicWindowsService.UpdateDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.UpdateDynamicWindowsDesktopRequest + 5, // 8: teleport.dynamicwindows.v1.DynamicWindowsService.UpsertDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.UpsertDynamicWindowsDesktopRequest + 6, // 9: teleport.dynamicwindows.v1.DynamicWindowsService.DeleteDynamicWindowsDesktop:input_type -> teleport.dynamicwindows.v1.DeleteDynamicWindowsDesktopRequest + 1, // 10: teleport.dynamicwindows.v1.DynamicWindowsService.ListDynamicWindowsDesktops:output_type -> teleport.dynamicwindows.v1.ListDynamicWindowsDesktopsResponse + 7, // 11: teleport.dynamicwindows.v1.DynamicWindowsService.GetDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 + 7, // 12: teleport.dynamicwindows.v1.DynamicWindowsService.CreateDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 + 7, // 13: teleport.dynamicwindows.v1.DynamicWindowsService.UpdateDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 + 7, // 14: teleport.dynamicwindows.v1.DynamicWindowsService.UpsertDynamicWindowsDesktop:output_type -> types.DynamicWindowsDesktopV1 + 8, // 15: teleport.dynamicwindows.v1.DynamicWindowsService.DeleteDynamicWindowsDesktop:output_type -> google.protobuf.Empty + 10, // [10:16] is the sub-list for method output_type + 4, // [4:10] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_init() } @@ -494,7 +559,7 @@ func file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_teleport_dynamicwindows_v1_dynamicwindows_service_proto_rawDesc, NumEnums: 0, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service_grpc.pb.go b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service_grpc.pb.go index 62e0fb6429b26..83d36000f707f 100644 --- a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service_grpc.pb.go +++ b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service_grpc.pb.go @@ -42,6 +42,7 @@ const ( DynamicWindowsService_GetDynamicWindowsDesktop_FullMethodName = "/teleport.dynamicwindows.v1.DynamicWindowsService/GetDynamicWindowsDesktop" DynamicWindowsService_CreateDynamicWindowsDesktop_FullMethodName = "/teleport.dynamicwindows.v1.DynamicWindowsService/CreateDynamicWindowsDesktop" DynamicWindowsService_UpdateDynamicWindowsDesktop_FullMethodName = "/teleport.dynamicwindows.v1.DynamicWindowsService/UpdateDynamicWindowsDesktop" + DynamicWindowsService_UpsertDynamicWindowsDesktop_FullMethodName = "/teleport.dynamicwindows.v1.DynamicWindowsService/UpsertDynamicWindowsDesktop" DynamicWindowsService_DeleteDynamicWindowsDesktop_FullMethodName = "/teleport.dynamicwindows.v1.DynamicWindowsService/DeleteDynamicWindowsDesktop" ) @@ -59,6 +60,8 @@ type DynamicWindowsServiceClient interface { CreateDynamicWindowsDesktop(ctx context.Context, in *CreateDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*types.DynamicWindowsDesktopV1, error) // UpdateDynamicWindowsDesktop updates an existing dynamic Windows desktop. UpdateDynamicWindowsDesktop(ctx context.Context, in *UpdateDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*types.DynamicWindowsDesktopV1, error) + // UpsertDynamicWindowsDesktop updates an existing dynamic Windows desktop or creates new if it doesn't exist. + UpsertDynamicWindowsDesktop(ctx context.Context, in *UpsertDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*types.DynamicWindowsDesktopV1, error) // DeleteDynamicWindowsDesktop removes the specified dynamic Windows desktop. DeleteDynamicWindowsDesktop(ctx context.Context, in *DeleteDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } @@ -111,6 +114,16 @@ func (c *dynamicWindowsServiceClient) UpdateDynamicWindowsDesktop(ctx context.Co return out, nil } +func (c *dynamicWindowsServiceClient) UpsertDynamicWindowsDesktop(ctx context.Context, in *UpsertDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*types.DynamicWindowsDesktopV1, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(types.DynamicWindowsDesktopV1) + err := c.cc.Invoke(ctx, DynamicWindowsService_UpsertDynamicWindowsDesktop_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dynamicWindowsServiceClient) DeleteDynamicWindowsDesktop(ctx context.Context, in *DeleteDynamicWindowsDesktopRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) @@ -135,6 +148,8 @@ type DynamicWindowsServiceServer interface { CreateDynamicWindowsDesktop(context.Context, *CreateDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) // UpdateDynamicWindowsDesktop updates an existing dynamic Windows desktop. UpdateDynamicWindowsDesktop(context.Context, *UpdateDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) + // UpsertDynamicWindowsDesktop updates an existing dynamic Windows desktop or creates new if it doesn't exist. + UpsertDynamicWindowsDesktop(context.Context, *UpsertDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) // DeleteDynamicWindowsDesktop removes the specified dynamic Windows desktop. DeleteDynamicWindowsDesktop(context.Context, *DeleteDynamicWindowsDesktopRequest) (*emptypb.Empty, error) mustEmbedUnimplementedDynamicWindowsServiceServer() @@ -159,6 +174,9 @@ func (UnimplementedDynamicWindowsServiceServer) CreateDynamicWindowsDesktop(cont func (UnimplementedDynamicWindowsServiceServer) UpdateDynamicWindowsDesktop(context.Context, *UpdateDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateDynamicWindowsDesktop not implemented") } +func (UnimplementedDynamicWindowsServiceServer) UpsertDynamicWindowsDesktop(context.Context, *UpsertDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpsertDynamicWindowsDesktop not implemented") +} func (UnimplementedDynamicWindowsServiceServer) DeleteDynamicWindowsDesktop(context.Context, *DeleteDynamicWindowsDesktopRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteDynamicWindowsDesktop not implemented") } @@ -255,6 +273,24 @@ func _DynamicWindowsService_UpdateDynamicWindowsDesktop_Handler(srv interface{}, return interceptor(ctx, in, info, handler) } +func _DynamicWindowsService_UpsertDynamicWindowsDesktop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpsertDynamicWindowsDesktopRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DynamicWindowsServiceServer).UpsertDynamicWindowsDesktop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DynamicWindowsService_UpsertDynamicWindowsDesktop_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DynamicWindowsServiceServer).UpsertDynamicWindowsDesktop(ctx, req.(*UpsertDynamicWindowsDesktopRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DynamicWindowsService_DeleteDynamicWindowsDesktop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteDynamicWindowsDesktopRequest) if err := dec(in); err != nil { @@ -296,6 +332,10 @@ var DynamicWindowsService_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpdateDynamicWindowsDesktop", Handler: _DynamicWindowsService_UpdateDynamicWindowsDesktop_Handler, }, + { + MethodName: "UpsertDynamicWindowsDesktop", + Handler: _DynamicWindowsService_UpsertDynamicWindowsDesktop_Handler, + }, { MethodName: "DeleteDynamicWindowsDesktop", Handler: _DynamicWindowsService_DeleteDynamicWindowsDesktop_Handler, diff --git a/api/mfa/ceremony.go b/api/mfa/ceremony.go index 67b55e8fea379..3b28162e62164 100644 --- a/api/mfa/ceremony.go +++ b/api/mfa/ceremony.go @@ -18,6 +18,7 @@ package mfa import ( "context" + "slices" "github.com/gravitational/trace" @@ -31,8 +32,21 @@ type Ceremony struct { CreateAuthenticateChallenge CreateAuthenticateChallengeFunc // PromptConstructor creates a prompt to prompt the user to solve an authentication challenge. PromptConstructor PromptConstructor + // SSOMFACeremonyConstructor is an optional SSO MFA ceremony constructor. If provided, + // the MFA ceremony will also attempt to retrieve an SSO MFA challenge. + SSOMFACeremonyConstructor SSOMFACeremonyConstructor } +// SSOMFACeremony is an SSO MFA ceremony. +type SSOMFACeremony interface { + GetClientCallbackURL() string + Run(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) + Close() +} + +// SSOMFACeremonyConstructor constructs a new SSO MFA ceremony. +type SSOMFACeremonyConstructor func(ctx context.Context) (SSOMFACeremony, error) + // CreateAuthenticateChallengeFunc is a function that creates an authentication challenge. type CreateAuthenticateChallengeFunc func(ctx context.Context, req *proto.CreateAuthenticateChallengeRequest) (*proto.MFAAuthenticateChallenge, error) @@ -53,6 +67,19 @@ func (c *Ceremony) Run(ctx context.Context, req *proto.CreateAuthenticateChallen return nil, trace.BadParameter("mfa challenge scope must be specified") } + // If available, prepare an SSO MFA ceremony and set the client redirect URL in the challenge + // request to request an SSO challenge in addition to other challenges. + if c.SSOMFACeremonyConstructor != nil { + ssoMFACeremony, err := c.SSOMFACeremonyConstructor(ctx) + if err != nil { + return nil, trace.Wrap(err, "failed to handle SSO MFA ceremony") + } + defer ssoMFACeremony.Close() + + req.SSOClientRedirectURL = ssoMFACeremony.GetClientCallbackURL() + promptOpts = append(promptOpts, withSSOMFACeremony(ssoMFACeremony)) + } + chal, err := c.CreateAuthenticateChallenge(ctx, req) if err != nil { // CreateAuthenticateChallenge returns a bad parameter error when the client @@ -74,6 +101,12 @@ func (c *Ceremony) Run(ctx context.Context, req *proto.CreateAuthenticateChallen return nil, trace.Wrap(&ErrMFANotSupported, "mfa ceremony must have PromptConstructor set in order to succeed") } + // Set challenge extensions in the prompt, if present, but set it first so the + // caller can still override it. + if req != nil && req.ChallengeExtensions != nil { + promptOpts = slices.Insert(promptOpts, 0, WithPromptChallengeExtensions(req.ChallengeExtensions)) + } + resp, err := c.PromptConstructor(promptOpts...).Run(ctx, chal) return resp, trace.Wrap(err) } diff --git a/api/mfa/ceremony_test.go b/api/mfa/ceremony_test.go index 7d94fd4de5327..d29b03a22487e 100644 --- a/api/mfa/ceremony_test.go +++ b/api/mfa/ceremony_test.go @@ -23,13 +23,14 @@ import ( "github.com/gravitational/trace" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/client/proto" mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1" "github.com/gravitational/teleport/api/mfa" ) -func TestPerformMFACeremony(t *testing.T) { +func TestMFACeremony(t *testing.T) { t.Parallel() ctx := context.Background() @@ -128,3 +129,77 @@ func TestPerformMFACeremony(t *testing.T) { }) } } + +func TestMFACeremony_SSO(t *testing.T) { + t.Parallel() + ctx := context.Background() + + testMFAChallenge := &proto.MFAAuthenticateChallenge{ + SSOChallenge: &proto.SSOChallenge{ + RedirectUrl: "redirect", + RequestId: "request-id", + }, + } + testMFAResponse := &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_SSO{ + SSO: &proto.SSOResponse{ + Token: "token", + RequestId: "request-id", + }, + }, + } + + ssoMFACeremony := &mfa.Ceremony{ + CreateAuthenticateChallenge: func(ctx context.Context, req *proto.CreateAuthenticateChallengeRequest) (*proto.MFAAuthenticateChallenge, error) { + return testMFAChallenge, nil + }, + PromptConstructor: func(opts ...mfa.PromptOpt) mfa.Prompt { + cfg := new(mfa.PromptConfig) + for _, opt := range opts { + opt(cfg) + } + + return mfa.PromptFunc(func(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + if cfg.SSOMFACeremony == nil { + return nil, trace.BadParameter("expected sso mfa ceremony") + } + + return cfg.SSOMFACeremony.Run(ctx, chal) + }) + }, + SSOMFACeremonyConstructor: func(ctx context.Context) (mfa.SSOMFACeremony, error) { + return &mockSSOMFACeremony{ + clientCallbackURL: "client-redirect", + prompt: func(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + return testMFAResponse, nil + }, + }, nil + }, + } + + resp, err := ssoMFACeremony.Run(ctx, &proto.CreateAuthenticateChallengeRequest{ + ChallengeExtensions: &mfav1.ChallengeExtensions{ + Scope: mfav1.ChallengeScope_CHALLENGE_SCOPE_ADMIN_ACTION, + }, + MFARequiredCheck: &proto.IsMFARequiredRequest{}, + }) + require.NoError(t, err) + require.Equal(t, testMFAResponse, resp) +} + +type mockSSOMFACeremony struct { + clientCallbackURL string + prompt mfa.PromptFunc +} + +// GetClientCallbackURL returns the client callback URL. +func (m *mockSSOMFACeremony) GetClientCallbackURL() string { + return m.clientCallbackURL +} + +// Run the SSO MFA ceremony. +func (m *mockSSOMFACeremony) Run(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + return m.prompt(ctx, chal) +} + +func (m *mockSSOMFACeremony) Close() {} diff --git a/api/mfa/prompt.go b/api/mfa/prompt.go index e139ff561fa64..9efd9fcb1fe44 100644 --- a/api/mfa/prompt.go +++ b/api/mfa/prompt.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/gravitational/teleport/api/client/proto" + mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1" ) // Prompt is an MFA prompt. @@ -50,6 +51,11 @@ type PromptConfig struct { DeviceType DeviceDescriptor // Quiet suppresses users prompts. Quiet bool + // Extensions are the challenge extensions used to create the prompt's challenge. + // Used to enrich certain prompts. + Extensions *mfav1.ChallengeExtensions + // SSOMFACeremony is an SSO MFA ceremony. + SSOMFACeremony SSOMFACeremony } // DeviceDescriptor is a descriptor for a device, such as "registered". @@ -83,7 +89,18 @@ func WithPromptReasonAdminAction() PromptOpt { // WithPromptReasonSessionMFA sets the prompt's PromptReason field to a standard session mfa message. func WithPromptReasonSessionMFA(serviceType, serviceName string) PromptOpt { - return WithPromptReason(fmt.Sprintf("MFA is required to access %s %q", serviceType, serviceName)) + return func(cfg *PromptConfig) { + cfg.PromptReason = fmt.Sprintf("MFA is required to access %s %q", serviceType, serviceName) + + // Set the extensions to scope USER_SESSION, which we know is true, but + // don't override any explicitly-set extensions (as they are likely more + // complete). + if cfg.Extensions == nil { + cfg.Extensions = &mfav1.ChallengeExtensions{ + Scope: mfav1.ChallengeScope_CHALLENGE_SCOPE_USER_SESSION, + } + } + } } // WithPromptDeviceType sets the prompt's DeviceType field. @@ -92,3 +109,20 @@ func WithPromptDeviceType(deviceType DeviceDescriptor) PromptOpt { cfg.DeviceType = deviceType } } + +// WithPromptChallengeExtensions sets the challenge extensions used to create +// the prompt's challenge. +// While not mandatory, informing the prompt of the extensions used allows for +// better user messaging. +func WithPromptChallengeExtensions(exts *mfav1.ChallengeExtensions) PromptOpt { + return func(cfg *PromptConfig) { + cfg.Extensions = exts + } +} + +// withSSOMFACeremony sets the SSO MFA ceremony for the MFA prompt. +func withSSOMFACeremony(ssoMFACeremony SSOMFACeremony) PromptOpt { + return func(cfg *PromptConfig) { + cfg.SSOMFACeremony = ssoMFACeremony + } +} diff --git a/api/proto/teleport/dynamicwindows/v1/dynamicwindows_service.proto b/api/proto/teleport/dynamicwindows/v1/dynamicwindows_service.proto index 63c899c6cbd2d..718cfb5145611 100644 --- a/api/proto/teleport/dynamicwindows/v1/dynamicwindows_service.proto +++ b/api/proto/teleport/dynamicwindows/v1/dynamicwindows_service.proto @@ -35,6 +35,8 @@ service DynamicWindowsService { rpc CreateDynamicWindowsDesktop(CreateDynamicWindowsDesktopRequest) returns (types.DynamicWindowsDesktopV1); // UpdateDynamicWindowsDesktop updates an existing dynamic Windows desktop. rpc UpdateDynamicWindowsDesktop(UpdateDynamicWindowsDesktopRequest) returns (types.DynamicWindowsDesktopV1); + // UpsertDynamicWindowsDesktop updates an existing dynamic Windows desktop or creates new if it doesn't exist. + rpc UpsertDynamicWindowsDesktop(UpsertDynamicWindowsDesktopRequest) returns (types.DynamicWindowsDesktopV1); // DeleteDynamicWindowsDesktop removes the specified dynamic Windows desktop. rpc DeleteDynamicWindowsDesktop(DeleteDynamicWindowsDesktopRequest) returns (google.protobuf.Empty); } @@ -63,18 +65,24 @@ message GetDynamicWindowsDesktopRequest { string name = 1; } -// CreateDynamicWindowsDesktopRequest is a request for a specific dynamic Windows desktop. +// CreateDynamicWindowsDesktopRequest is used for creating new dynamic Windows desktops. message CreateDynamicWindowsDesktopRequest { // desktop to be created types.DynamicWindowsDesktopV1 desktop = 1; } -// UpdateDynamicWindowsDesktopRequest is a request for a specific dynamic Windows desktop. +// UpdateDynamicWindowsDesktopRequest is used for updating existing dynamic Windows desktops. message UpdateDynamicWindowsDesktopRequest { // desktop to be updated types.DynamicWindowsDesktopV1 desktop = 1; } +// UpsertDynamicWindowsDesktopRequest is used for upserting dynamic Windows desktops. +message UpsertDynamicWindowsDesktopRequest { + // desktop to be upserted + types.DynamicWindowsDesktopV1 desktop = 1; +} + // DeleteDynamicWindowsDesktopRequest is a request to delete a Windows desktop host. message DeleteDynamicWindowsDesktopRequest { // name is the name of the Windows desktop host. diff --git a/api/types/autoupdate/rollout_test.go b/api/types/autoupdate/rollout_test.go index cce4dc8495d83..66c1b705d1568 100644 --- a/api/types/autoupdate/rollout_test.go +++ b/api/types/autoupdate/rollout_test.go @@ -41,7 +41,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { spec: &autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2.3.4-dev", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: AgentsUpdateModeEnabled, Strategy: AgentsStrategyHaltOnError, }, @@ -57,7 +57,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { Spec: &autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2.3.4-dev", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: AgentsUpdateModeEnabled, Strategy: AgentsStrategyHaltOnError, }, @@ -74,7 +74,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { name: "missing start version", spec: &autoupdate.AutoUpdateAgentRolloutSpec{ TargetVersion: "2.3.4-dev", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: AgentsUpdateModeEnabled, Strategy: AgentsStrategyHaltOnError, }, @@ -87,7 +87,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { spec: &autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2-3-4", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: AgentsUpdateModeEnabled, Strategy: AgentsStrategyHaltOnError, }, @@ -100,7 +100,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { spec: &autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2.3.4-dev", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: "invalid-mode", Strategy: AgentsStrategyHaltOnError, }, @@ -126,7 +126,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) { spec: &autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2.3.4-dev", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, AutoupdateMode: AgentsUpdateModeEnabled, Strategy: "invalid-strategy", }, diff --git a/api/types/autoupdate/utils.go b/api/types/autoupdate/utils.go index 4772ff8a94411..30658c80d71ec 100644 --- a/api/types/autoupdate/utils.go +++ b/api/types/autoupdate/utils.go @@ -51,8 +51,10 @@ func checkToolsMode(mode string) error { func checkScheduleName(schedule string) error { switch schedule { - case AgentsScheduleRegular, AgentsScheduleImmediate: + case AgentsScheduleImmediate: return nil + case AgentsScheduleRegular: + return trace.BadParameter("regular schedule is not implemented yet") default: return trace.BadParameter("unsupported schedule type: %q", schedule) } diff --git a/api/types/autoupdate/version_test.go b/api/types/autoupdate/version_test.go index a59a4f6fe6c22..793d7d6a2a145 100644 --- a/api/types/autoupdate/version_test.go +++ b/api/types/autoupdate/version_test.go @@ -94,7 +94,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { Agents: &autoupdate.AutoUpdateVersionSpecAgents{ StartVersion: "1.2.3-dev.1", TargetVersion: "1.2.3-dev.2", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, Mode: AgentsUpdateModeEnabled, }, }, @@ -111,7 +111,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { Agents: &autoupdate.AutoUpdateVersionSpecAgents{ StartVersion: "1.2.3-dev.1", TargetVersion: "1.2.3-dev.2", - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, Mode: AgentsUpdateModeEnabled, }, }, @@ -124,7 +124,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { StartVersion: "", TargetVersion: "1.2.3", Mode: AgentsUpdateModeEnabled, - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, }, }, assertErr: func(t *testing.T, err error, a ...any) { @@ -138,7 +138,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { StartVersion: "1.2.3-dev", TargetVersion: "", Mode: AgentsUpdateModeEnabled, - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, }, }, assertErr: func(t *testing.T, err error, a ...any) { @@ -152,7 +152,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { StartVersion: "17-0-0", TargetVersion: "1.2.3", Mode: AgentsUpdateModeEnabled, - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, }, }, assertErr: func(t *testing.T, err error, a ...any) { @@ -166,7 +166,7 @@ func TestNewAutoUpdateVersion(t *testing.T) { StartVersion: "1.2.3", TargetVersion: "17-0-0", Mode: AgentsUpdateModeEnabled, - Schedule: AgentsScheduleRegular, + Schedule: AgentsScheduleImmediate, }, }, assertErr: func(t *testing.T, err error, a ...any) { diff --git a/api/types/trustedcluster.go b/api/types/trustedcluster.go index 7e233c864c826..27d8129f70cfe 100644 --- a/api/types/trustedcluster.go +++ b/api/types/trustedcluster.go @@ -22,6 +22,8 @@ import ( "time" "github.com/gravitational/trace" + + "github.com/gravitational/teleport/api/utils" ) // TrustedCluster holds information needed for a cluster that can not be directly @@ -60,6 +62,8 @@ type TrustedCluster interface { SetReverseTunnelAddress(string) // CanChangeStateTo checks the TrustedCluster can transform into another. CanChangeStateTo(TrustedCluster) error + // Clone returns a deep copy of the TrustedCluster. + Clone() TrustedCluster } // NewTrustedCluster is a convenience way to create a TrustedCluster resource. @@ -259,6 +263,10 @@ func (c *TrustedClusterV2) CanChangeStateTo(t TrustedCluster) error { return nil } +func (c *TrustedClusterV2) Clone() TrustedCluster { + return utils.CloneProtoMsg(c) +} + // String represents a human readable version of trusted cluster settings. func (c *TrustedClusterV2) String() string { return fmt.Sprintf("TrustedCluster(Enabled=%v,Roles=%v,Token=%v,ProxyAddress=%v,ReverseTunnelAddress=%v)", diff --git a/api/utils/entraid/federation_metadata.go b/api/utils/entraid/federation_metadata.go new file mode 100644 index 0000000000000..2dfa76080cdeb --- /dev/null +++ b/api/utils/entraid/federation_metadata.go @@ -0,0 +1,33 @@ +/* +Copyright 2024 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package entraid + +import ( + "net/url" + "path" +) + +// FederationMetadataURL returns the URL for the federation metadata endpoint +func FederationMetadataURL(tenantID, appID string) string { + return (&url.URL{ + Scheme: "https", + Host: "login.microsoftonline.com", + Path: path.Join(tenantID, "federationmetadata", "2007-06", "federationmetadata.xml"), + RawQuery: url.Values{ + "appid": {appID}, + }.Encode(), + }).String() +} diff --git a/assets/aws/files/system/teleport-proxy-acm.service b/assets/aws/files/system/teleport-proxy-acm.service index 757c9219e4b0d..ca5e913a081a6 100644 --- a/assets/aws/files/system/teleport-proxy-acm.service +++ b/assets/aws/files/system/teleport-proxy-acm.service @@ -11,8 +11,6 @@ Restart=always RestartSec=5 RuntimeDirectory=teleport EnvironmentFile=-/etc/default/teleport -# TODO(gus): REMOVE IN 17.0.0 - /etc/default/teleport should be used instead -EnvironmentFile=/etc/teleport.d/conf ExecStartPre=/usr/local/bin/teleport-ssm-get-token ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3000 --pid-file=/run/teleport/teleport.pid # systemd before 239 needs an absolute path diff --git a/assets/aws/files/system/teleport-proxy.service b/assets/aws/files/system/teleport-proxy.service index 8fe4ba5985ce6..3c31de24b1178 100644 --- a/assets/aws/files/system/teleport-proxy.service +++ b/assets/aws/files/system/teleport-proxy.service @@ -11,8 +11,6 @@ Restart=always RestartSec=5 RuntimeDirectory=teleport EnvironmentFile=-/etc/default/teleport -# TODO(gus): REMOVE IN 17.0.0 - /etc/default/teleport should be used instead -EnvironmentFile=/etc/teleport.d/conf ExecStartPre=/usr/local/bin/teleport-ssm-get-token ExecStartPre=/bin/aws s3 sync s3://${TELEPORT_S3_BUCKET}/live/${TELEPORT_DOMAIN_NAME} /var/lib/teleport ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3000 --pid-file=/run/teleport/teleport.pid diff --git a/build.assets/tooling/cmd/release-notes/README.md b/build.assets/tooling/cmd/release-notes/README.md deleted file mode 100644 index 1a8c8e41f09f4..0000000000000 --- a/build.assets/tooling/cmd/release-notes/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# release-notes - -A release notes generator for Teleport releases. - -## Usage - -```shell -usage: release-notes - - -Flags: - --[no-]help Show context-sensitive help (also try --help-long and --help-man). - -Args: - Version to be released - Path to CHANGELOG.md -``` - -This script is expected to be run along side the `gh` CLI to create a release. - -```shell -release-notes $VERSION CHANGELOG.md | gh release create \ - -t "Teleport $VERSION" \ - --latest=false \ - --target=$BRANCH \ - --verify-tag \ - -F - \ - -``` \ No newline at end of file diff --git a/build.assets/tooling/cmd/release-notes/main.go b/build.assets/tooling/cmd/release-notes/main.go deleted file mode 100644 index 8ec06e4c43c93..0000000000000 --- a/build.assets/tooling/cmd/release-notes/main.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Teleport - * Copyright (C) 2024 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package main - -import ( - "fmt" - "log" - "os" - - "github.com/alecthomas/kingpin/v2" -) - -var ( - version = kingpin.Arg("version", "Version to be released").Required().String() - changelog = kingpin.Arg("changelog", "Path to CHANGELOG.md").Required().String() - labels = kingpin.Flag("labels", "Labels to apply to the end of a release, e.g. security labels").String() -) - -func main() { - kingpin.Parse() - - clFile, err := os.Open(*changelog) - if err != nil { - log.Fatal(err) - } - defer clFile.Close() - - gen := &releaseNotesGenerator{ - releaseVersion: *version, - labels: *labels, - } - - notes, err := gen.generateReleaseNotes(clFile) - if err != nil { - log.Fatal(err) - } - fmt.Println(notes) -} diff --git a/build.assets/tooling/cmd/release-notes/release_notes.go b/build.assets/tooling/cmd/release-notes/release_notes.go deleted file mode 100644 index 6795efc841dbb..0000000000000 --- a/build.assets/tooling/cmd/release-notes/release_notes.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Teleport - * Copyright (C) 2024 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package main - -import ( - "bufio" - "bytes" - _ "embed" - "fmt" - "html/template" - "io" - "strings" - - "github.com/gravitational/trace" -) - -//go:embed template/release-notes.md.tmpl -var tmpl string - -type tmplInfo struct { - Version string - Description string - Labels string -} - -var ( - releaseNotesTemplate = template.Must(template.New("release notes").Parse(tmpl)) -) - -type releaseNotesGenerator struct { - // releaseVersion is the version for the release. - // This will be compared against the version present in the changelog. - releaseVersion string - // labels is a string applied to the end of the release description - // that will be picked up by other automation. - // - // It won't be validated but it is expected to be a comma separated list of - // entries in the format - // label=key - labels string -} - -func (r *releaseNotesGenerator) generateReleaseNotes(md io.Reader) (string, error) { - desc, err := r.parseMD(md) - if err != nil { - return "", err - } - - info := tmplInfo{ - Version: r.releaseVersion, - Description: desc, - Labels: r.labels, - } - var buff bytes.Buffer - if err := releaseNotesTemplate.Execute(&buff, info); err != nil { - return "", trace.Wrap(err) - } - return buff.String(), nil -} - -// parseMD is a simple implementation of a parser to extract the description from a changelog. -// Will scan for the first double header and pull the version from that. -// Will pull all information between the first and second double header for the description. -func (r *releaseNotesGenerator) parseMD(md io.Reader) (string, error) { - sc := bufio.NewScanner(md) - - // Extract the first second-level heading - var heading string - for sc.Scan() { - if strings.HasPrefix(sc.Text(), "## ") { - heading = strings.TrimSpace(strings.TrimPrefix(sc.Text(), "## ")) - break - } - } - if err := sc.Err(); err != nil { - return "", trace.Wrap(err) - } - if heading == "" { - return "", trace.BadParameter("no second-level heading found in changelog") - } - - // Expected heading would be something like "16.0.4 (MM/DD/YY)" - parts := strings.SplitN(heading, " ", 2) - if parts[0] != r.releaseVersion { - return "", trace.BadParameter("changelog version number did not match expected version number: %q != %q", parts[0], r.releaseVersion) - } - - // Write everything until next header to buffer - var buff bytes.Buffer - for sc.Scan() && !strings.HasPrefix(sc.Text(), "## ") { - if _, err := fmt.Fprintln(&buff, sc.Text()); err != nil { - return "", trace.Wrap(err) - } - } - if err := sc.Err(); err != nil { - return "", trace.Wrap(err) - } - - return strings.TrimSpace(buff.String()), nil -} diff --git a/build.assets/tooling/cmd/release-notes/release_notes_test.go b/build.assets/tooling/cmd/release-notes/release_notes_test.go deleted file mode 100644 index 67af99d28ce9c..0000000000000 --- a/build.assets/tooling/cmd/release-notes/release_notes_test.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Teleport - * Copyright (C) 2024 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package main - -import ( - _ "embed" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func Test_generateReleaseNotes(t *testing.T) { - tests := []struct { - name string - releaseVersion string - labels string - clFile *os.File - want string - wantErr bool - }{ - { - name: "happy path", - releaseVersion: "16.0.1", - clFile: mustOpen(t, "test-changelog.md"), - want: mustRead(t, "expected-release-notes.md"), - wantErr: false, - }, - { - name: "with labels", - releaseVersion: "16.0.1", - labels: "security-patch=yes, security-patch-alts=v16.0.0,v16.0.1", - clFile: mustOpen(t, "test-changelog.md"), - want: mustRead(t, "expected-with-labels.md"), - wantErr: false, - }, - { - name: "version mismatch", - releaseVersion: "15.0.1", // test-changelog has 16.0.1 - clFile: mustOpen(t, "test-changelog.md"), - want: "", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &releaseNotesGenerator{ - releaseVersion: tt.releaseVersion, - labels: tt.labels, - } - - got, err := r.generateReleaseNotes(tt.clFile) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - assert.Equal(t, tt.want, got) - }) - } -} - -func mustOpen(t *testing.T, filename string) *os.File { - testfile, err := os.Open(filepath.Join("testdata", filename)) - require.NoError(t, err) - return testfile -} - -func mustRead(t *testing.T, filename string) string { - expectedReleaseNotes, err := os.ReadFile(filepath.Join("testdata", filename)) - require.NoError(t, err) - return string(expectedReleaseNotes) -} diff --git a/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl b/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl deleted file mode 100644 index a4825e3ac7d40..0000000000000 --- a/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl +++ /dev/null @@ -1,26 +0,0 @@ -## Description - -{{ .Description }} - -## Download - -Download the current and previous releases of Teleport at https://goteleport.com/download. - -## Plugins - -Download the current release of Teleport plugins from the links below. -* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-universal-bin.tar.gz) -* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-darwin-amd64-bin.tar.gz) -* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v{{ .Version }}-linux-arm64-bin.tar.gz) -* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v{{ .Version }}-linux-arm64-bin.tar.gz) -{{- if .Labels }} - ---- - -labels: {{ .Labels }} -{{- end }} diff --git a/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md b/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md deleted file mode 100644 index a8e835ad84ad5..0000000000000 --- a/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md +++ /dev/null @@ -1,26 +0,0 @@ -## Description - -* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115) -* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095) -* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084) -* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067) -* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966) -* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957) -* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943) - -## Download - -Download the current and previous releases of Teleport at https://goteleport.com/download. - -## Plugins - -Download the current release of Teleport plugins from the links below. -* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-arm64-bin.tar.gz) -* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-arm64-bin.tar.gz) -* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-arm64-bin.tar.gz) -* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-universal-bin.tar.gz) -* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-darwin-amd64-bin.tar.gz) -* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-arm64-bin.tar.gz) -* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-arm64-bin.tar.gz) -* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-arm64-bin.tar.gz) -* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-arm64-bin.tar.gz) diff --git a/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md b/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md deleted file mode 100644 index 4a91b668129d2..0000000000000 --- a/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md +++ /dev/null @@ -1,30 +0,0 @@ -## Description - -* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115) -* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095) -* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084) -* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067) -* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966) -* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957) -* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943) - -## Download - -Download the current and previous releases of Teleport at https://goteleport.com/download. - -## Plugins - -Download the current release of Teleport plugins from the links below. -* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-arm64-bin.tar.gz) -* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-arm64-bin.tar.gz) -* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-arm64-bin.tar.gz) -* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-universal-bin.tar.gz) -* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-darwin-amd64-bin.tar.gz) -* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-arm64-bin.tar.gz) -* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-arm64-bin.tar.gz) -* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-arm64-bin.tar.gz) -* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-arm64-bin.tar.gz) - ---- - -labels: security-patch=yes, security-patch-alts=v16.0.0,v16.0.1 diff --git a/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md b/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md deleted file mode 100644 index 912a9a1060100..0000000000000 --- a/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -## 16.0.1 (06/17/24) - -* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115) -* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095) -* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084) -* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067) -* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966) -* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957) -* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943) - -## 16.0.0 (06/13/24) - -Teleport 16 brings the following new features and improvements: - -- Teleport VNet -- Device Trust for the Web UI -- Increased support for per-session MFA -- Web UI notification system -- Access requests from the resources view -- `tctl` for Windows -- Teleport plugins improvements diff --git a/docs/config.json b/docs/config.json index f955d894dbc16..0a63fa2737f71 100644 --- a/docs/config.json +++ b/docs/config.json @@ -132,7 +132,7 @@ "aws_secret_access_key": "zyxw9876-this-is-an-example" }, "cloud": { - "version": "16.4.2", + "version": "16.4.3", "major_version": "16", "sla": { "monthly_percentage": "99.9%", diff --git a/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx index d5329defb9feb..4e3a186d6b0c4 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx @@ -83,5 +83,6 @@ cluster configuration matches your expectations. - Follow [the user and role IaC guide](user-and-role.mdx) to use the Terraform Provider to create Teleport users and grant them roles. - Explore the full list of supported [Terraform provider - resources](../../../reference/terraform-provider.mdx). -- See [the list of supported Teleport Terraform setups](../terraform-provider/terraform-provider.mdx): + resources](../../../reference/terraform-provider/terraform-provider.mdx). +- See [the list of supported Teleport Terraform + setups](../terraform-provider/terraform-provider.mdx): diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx index 3afb79ea87035..683f4f3082bf8 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx @@ -159,7 +159,7 @@ $ tctl get role/terraform-test ## Next steps - Explore the - [Terraform provider resource reference](../../../reference/terraform-provider.mdx) + [Terraform provider resource reference](../../../reference/terraform-provider/terraform-provider.mdx) to discover what can be configured with the Teleport Terraform provider. - Read the [tbot configuration reference](../../../reference/machine-id/configuration.mdx) to explore all the available `tbot` configuration options. diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx index 59c49dd2e831f..d6912c912c32e 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx @@ -159,8 +159,10 @@ Do not forget to obtain new temporary credentials every hour by re-running `eval - Follow [the user and role IaC guide](../managing-resources/user-and-role.mdx) to use the Terraform Provider to create Teleport users and grant them roles. - Consult the list of Terraform-supported - resources [in the Terraform reference](../../../reference/terraform-provider.mdx). -- Once you have working Terraform code that configures your Teleport cluster, you might want to run it in the CI or - from a bastion instead of running it locally. To do this, please follow the dedicated guides: + resources [in the Terraform + reference](../../../reference/terraform-provider/terraform-provider.mdx). +- Once you have working Terraform code that configures your Teleport cluster, + you might want to run it in the CI or from a bastion instead of running it + locally. To do this, please follow the dedicated guides: - [Run the Terraform Provider in CI or cloud VMs](./ci-or-cloud.mdx) - [Run the Terraform Provider on a dedicated server](./dedicated-server.mdx) diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx index ee1e9e3ed3cc5..9e8d15e9d372b 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx @@ -199,6 +199,9 @@ To apply the configuration: ## Next steps -- Explore the full list of supported [Terraform provider resources](../../../reference/terraform-provider.mdx). -- Learn [how to manage users and roles with IaC](../managing-resources/user-and-role.mdx) -- Read more about [impersonation](../../access-controls/guides/impersonation.mdx). +- Explore the full list of supported [Terraform provider + resources](../../../reference/terraform-provider/terraform-provider.mdx). +- Learn [how to manage users and roles with + IaC](../managing-resources/user-and-role.mdx) +- Read more about + [impersonation](../../access-controls/guides/impersonation.mdx). diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx index 976b95f8b2306..249a954eee4a3 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx @@ -260,8 +260,8 @@ $ tctl get users/terraform-test - Now that you know how to manage Teleport configuration resources with Terraform and Spacelift, read the [Terraform resource - reference](../../../reference/terraform-provider.mdx) so you can flesh out your - configuration. + reference](../../../reference/terraform-provider/terraform-provider.mdx) so + you can flesh out your configuration. - To find out more about Spacelift's OIDC implementation, which Machine ID uses to authenticate to your Teleport cluster, read [the Spacelift documentation](https://docs.spacelift.io/integrations/cloud-providers/oidc/). diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx index e19b9a49b0fc9..5a7a41505ae19 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx @@ -28,8 +28,8 @@ is executed. You must pick the correct guide for your setup: Once you have a functional Teleport Terraform provider, you will want to configure your resources with it. -You can find the list of supported resources and their fields is -available [in the Terraform reference](../../../reference/terraform-provider.mdx). +The list of supported resources and their fields is available [in the Terraform +reference](../../../reference/terraform-provider/terraform-provider.mdx). Some resources have their dedicated Infrastructure-as-Code (IaC) step-by step guides such as: - [Managing Users And Roles With IaC](../managing-resources/user-and-role.mdx) diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx index d4de6522c848a..5a2f34e326db2 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx @@ -623,7 +623,7 @@ edit your Terraform module to: 1. **Change the userdata script** to enable additional Agent services additional infrastructure resources for your Agents to proxy. 1. **Deploy dynamic resources:** Consult the [Terraform provider - reference](../../../reference/terraform-provider.mdx) for Terraform resources - that you can apply in order to enroll dynamic resources in your - infrastructure. + reference](../../../reference/terraform-provider/terraform-provider.mdx) for + Terraform resources that you can apply in order to enroll dynamic resources + in your infrastructure. diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx index 3699876228ef1..865192382bc8a 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx @@ -563,4 +563,4 @@ troubleshoot the single sign-on provider. Now that you have configured RBAC in your Terraform demo cluster, fine-tune your setup by reading the comprehensive [Terraform provider -reference](../../../reference/terraform-provider.mdx). +reference](../../../reference/terraform-provider/terraform-provider.mdx). diff --git a/docs/pages/reference/signature-algorithms.mdx b/docs/pages/reference/signature-algorithms.mdx new file mode 100644 index 0000000000000..ff3aed3eb85fc --- /dev/null +++ b/docs/pages/reference/signature-algorithms.mdx @@ -0,0 +1,217 @@ +--- +title: Signature Algorithms +h1: Signature Algorithms Reference +description: "Signature algorithms used in Teleport." +--- + +The Teleport Auth Service issues SSH and TLS certificates to users and hosts +that allow all connections to be authenticated, authorized, and encrypted. +This page describes the cryptographic signature algorithms used to sign each +kind of certificate issued by Teleport. + +Continue reading to learn how to: + +- configure a Teleport cluster created before Teleport 17 to use fast and secure + elliptic-curve keys +- configure your cluster to use FIPS-compatible signature algorithms +- configure your cluster to use signature algorithms compatible with your HSM or KMS. + +## Signature algorithm suites + +New Teleport clusters created after Teleport 17 will automatically use +elliptic-curve keys in most cases. +If you created your cluster on an older version of Teleport it will continue to +use RSA keys until you opt in to the new algorithms by configuring a **signature +algorithm suite**. +By selecting a single algorithm suite, you can control all of the cryptographic +signature algorithms used across your cluster. + +### `legacy` + +The `legacy` suite identifies the original Teleport behavior where all +signatures are based on 2048-bit RSA keys. +Teleport clusters created on versions prior to 17.0.0 effectively have always +used the `legacy` suite and this will not automatically change when they upgrade +to newer versions. + +We recommend that users upgrade to one of the newer suites when they are able. + +### `balanced-v1` + +The `balanced-v1` suite is the default suite for self-hosted clusters created +after version 17.0.0. +It is our recommended suite for most self-hosted users. +When this suite is configured, Ed25519 is used for all SSH certificates and +ECDSA with the NIST P-256 curve is used for all TLS certificates. + +RSA is still used where compatibility with common third-party software that +Teleport integrates with is known to be unable to support non-RSA algorithms. +This includes certificates issued by the `db` or `db_client` CAs and certain +JSON Web Tokens (JWTs) that are issued by Teleport. + +### `fips-v1` + +Users deploying Teleport in FIPS mode are recommended to use the `fips-v1` +suite. +New clusters created after version 17.0.0 in FIPS mode will use this suite by +default. + +FIPS 186-5 only added approval for Ed25519 relatively recently (in February 2023) +and there is some nuance to how the algorithm can be used. +More importantly for Teleport, the `boringcrypto` module our FIPS Go binaries are +compiled with does not yet support Ed25519. +For these reasons, the `fips-v1` suite is based on the `balanced-v1` suite but +replaces all uses of Ed25519 with ECDSA. + +### `hsm-v1` + +The `hsm-v1` suite is designed for Cloud customers and self-hosted users that +have opted in to keeping their Certificate Authority key material in an HSM or KMS. +It is the default suite for new clusters created after version 17.0.0 that have +an HSM or KMS configured. +It will be the default suite for new Teleport Cloud clusters on version 17.x+. + +Teleport's integration with PKCS#11 HSMs and cloud KMSs does not yet support +Ed25519. +For this reason, the `hsm-v1` suite is based on the `balanced-v1` suite but uses +ECDSA in place of Ed25519 for all Certificate Authority keys. +User and host SSH keys still use Ed25519. + +## Configuration + +The cluster signature algorithm suite can be configured statically in the Auth Service +configuration file or dynamically in the `cluster_auth_preference` resource. + +### Static configuration + +Add the following to your Teleport Auth Service configuration file, which is stored in +`/etc/teleport.yaml` by default. + + ```yaml + auth_service: + authentication: + signature_algorithm_suite: "balanced-v1" + ``` + +### Dynamic resource + +Edit your `cluster_auth_preference` resource: + +```code +$ tctl edit cap +``` + +Ensure that the resource includes the following content: + +```yaml +kind: cluster_auth_preference +metadata: + name: cluster-auth-preference +spec: + signature_algorithm_suite: "balanced-v1" +version: v2 +``` + +## Certificate Authorities + +The `tctl status` command will display the status of each of your Teleport +cluster's Certificate Authorities, including the algorithm used for each key pair. + +```code +$ tctl status +Cluster: example.teleport.sh +Version: 17.0.0 +CA pins: sha256:b1419d94442b2b1ba70f967157bf177c7605020c59ee93a10b0e4d3fc526e7df + +authority rotation protocol status algorithm storage +--------- ----------------------- -------- ------ ----------- -------- +host standby (never rotated) SSH active Ed25519 software + TLS active ECDSA P-256 software +user standby (never rotated) SSH active Ed25519 software + TLS active ECDSA P-256 software +db standby (never rotated) TLS active RSA 2048 software +db_client standby (never rotated) TLS active RSA 2048 software +openssh standby (never rotated) SSH active Ed25519 software +jwt standby (never rotated) JWT active ECDSA P-256 software +saml_idp standby (never rotated) TLS active RSA 2048 software +oidc_idp standby (never rotated) JWT active RSA 2048 software +spiffe standby (never rotated) JWT active RSA 2048 software + TLS active ECDSA P-256 software +okta standby (never rotated) JWT active ECDSA P-256 software +``` + +Each certificate authority is automatically generated the first time your Auth +Service starts up when you create a new Teleport cluster. +If you change your cluster's signature algorithm suite after the cluster has +already been created, new user and host keys will use the new algorithms, but +the key material of each Certificate Authority will not automatically be +updated. + +In order to use new signature algorithms for your existing Certificate +Authorities, you will need to complete a CA rotation for each authority. +This may require manual steps to update the trust relationships in your Cluster. +The procedure is documented in the [CA rotation guide](../admin-guides/management/operations/ca-rotation.mdx). +This process is optional, your cluster will continue to function with the +existing Certificate Authority keys if you don't complete a CA rotation. + +## Algorithms + +The following table lists the key algorithm used for each key Teleport generates +in each suite. + +| key purpose | `legacy` | `balanced-v1` | `fips-v1` | `hsm-v1` | +|------------------------|-------------|---------------|-------------|-------------| +| User CA (SSH) | RSA 2048 | Ed25519 | ECDSA P-256 | ECDSA P-256 | +| User CA (TLS) | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| Host CA (SSH) | RSA 2048 | Ed25519 | ECDSA P-256 | ECDSA P-256 | +| Host CA (TLS) | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| Database CA | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| Database Client CA | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| OpenSSH CA | RSA 2048 | Ed25519 | ECDSA P-256 | ECDSA P-256 | +| JWT CA | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| OIDC IdP CA | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| SAML IdP CA | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| SPIFFE CA (TLS) | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| SPIFFE CA (JWT) | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| Okta CA | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| User SSH | RSA 2048 | Ed25519 | ECDSA P-256 | Ed25519 | +| User TLS | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| Database Client | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| Database Server | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | +| Host SSH | RSA 2048 | Ed25519 | ECDSA P-256 | Ed25519 | +| Host Identity | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| MachineID Identity | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| Workload ID SVID | RSA 2048 | ECDSA P-256 | ECDSA P-256 | ECDSA P-256 | +| EC2 Instance Connect | Ed25519 | Ed25519 | ECDSA P-256 | Ed25519 | +| Windows Desktop Client | RSA 2048 | RSA 2048 | RSA 2048 | RSA 2048 | + +## FAQ + +### What if my use-case doesn't support the new algorithms? + +Try it and let us know! +We aim to balance security, performance, and compatibility with the chosen +signature algorithm suites. +It is okay to continue using the `legacy` suite for the forseeable future and we +expect it may be required for some user's environments. + +### How did you choose these algorithms? + +Ed25519 is a modern, fast, secure algorithm with small keys that has become the +de-facto standard for new SSH keys. +It is our preference in cases where it is compatible with everything Teleport +needs to interact with. + +ECDSA with the NIST P-256 curve is widely used and supported for TLS and we use +it in cases where there is not good support for Ed25519. +It has similar speed and security properties to Ed25519. + +We only continue to use RSA where we interact with third-party software that +does not support Ed25519 or ECDSA. + +### Why can't I pick a specific algorithm for a specific Teleport cert? + +The signature algorithm suites are designed to simplify the configuration burden. +We did not want to expose 100 configuration knobs to modify every single +signature Teleport does, which could lead to thousands of possible combinations +we'd have to support, and could create the possibility for insecure combinations. diff --git a/docs/pages/reference/terraform-provider/data-sources.mdx b/docs/pages/reference/terraform-provider/data-sources.mdx deleted file mode 100644 index 6c7f82c16279a..0000000000000 --- a/docs/pages/reference/terraform-provider/data-sources.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Terraform data-sources index" -description: "Index of all the data-sources supported by the Teleport Terraform Provider" ---- - -{/*Auto-generated file. Do not edit.*/} -{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/} - -{/* - This file will be renamed data-sources.mdx during build time. - The template name is reserved by tfplugindocs so we suffix with -index. -*/} - -The Teleport Terraform provider supports the following data-sources: - - - [`teleport_access_list`](./data-sources/access_list.mdx) - - [`teleport_access_monitoring_rule`](./data-sources/access_monitoring_rule.mdx) - - [`teleport_app`](./data-sources/app.mdx) - - [`teleport_auth_preference`](./data-sources/auth_preference.mdx) - - [`teleport_cluster_maintenance_config`](./data-sources/cluster_maintenance_config.mdx) - - [`teleport_cluster_networking_config`](./data-sources/cluster_networking_config.mdx) - - [`teleport_database`](./data-sources/database.mdx) - - [`teleport_github_connector`](./data-sources/github_connector.mdx) - - [`teleport_installer`](./data-sources/installer.mdx) - - [`teleport_login_rule`](./data-sources/login_rule.mdx) - - [`teleport_oidc_connector`](./data-sources/oidc_connector.mdx) - - [`teleport_okta_import_rule`](./data-sources/okta_import_rule.mdx) - - [`teleport_provision_token`](./data-sources/provision_token.mdx) - - [`teleport_role`](./data-sources/role.mdx) - - [`teleport_saml_connector`](./data-sources/saml_connector.mdx) - - [`teleport_session_recording_config`](./data-sources/session_recording_config.mdx) - - [`teleport_static_host_user`](./data-sources/static_host_user.mdx) - - [`teleport_trusted_cluster`](./data-sources/trusted_cluster.mdx) - - [`teleport_trusted_device`](./data-sources/trusted_device.mdx) - - [`teleport_user`](./data-sources/user.mdx) diff --git a/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx b/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx new file mode 100644 index 0000000000000..047a8a04a630b --- /dev/null +++ b/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx @@ -0,0 +1,35 @@ +--- +title: "Terraform data-sources index" +description: "Index of all the data-sources supported by the Teleport Terraform Provider" +--- + +{/*Auto-generated file. Do not edit.*/} +{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/} + +{/* + This file will be renamed data-sources.mdx during build time. + The template name is reserved by tfplugindocs so we suffix with -index. +*/} + +The Teleport Terraform provider supports the following data-sources: + + - [`teleport_access_list`](./access_list.mdx) + - [`teleport_access_monitoring_rule`](./access_monitoring_rule.mdx) + - [`teleport_app`](./app.mdx) + - [`teleport_auth_preference`](./auth_preference.mdx) + - [`teleport_cluster_maintenance_config`](./cluster_maintenance_config.mdx) + - [`teleport_cluster_networking_config`](./cluster_networking_config.mdx) + - [`teleport_database`](./database.mdx) + - [`teleport_github_connector`](./github_connector.mdx) + - [`teleport_installer`](./installer.mdx) + - [`teleport_login_rule`](./login_rule.mdx) + - [`teleport_oidc_connector`](./oidc_connector.mdx) + - [`teleport_okta_import_rule`](./okta_import_rule.mdx) + - [`teleport_provision_token`](./provision_token.mdx) + - [`teleport_role`](./role.mdx) + - [`teleport_saml_connector`](./saml_connector.mdx) + - [`teleport_session_recording_config`](./session_recording_config.mdx) + - [`teleport_static_host_user`](./static_host_user.mdx) + - [`teleport_trusted_cluster`](./trusted_cluster.mdx) + - [`teleport_trusted_device`](./trusted_device.mdx) + - [`teleport_user`](./user.mdx) diff --git a/docs/pages/reference/terraform-provider/resources.mdx b/docs/pages/reference/terraform-provider/resources.mdx deleted file mode 100644 index dd2640e926d22..0000000000000 --- a/docs/pages/reference/terraform-provider/resources.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Terraform resources index" -description: "Index of all the datasources supported by the Teleport Terraform Provider" ---- - -{/*Auto-generated file. Do not edit.*/} -{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/} - -{/* - This file will be renamed data-sources.mdx during build time. - The template name is reserved by tfplugindocs so we suffix with -index. -*/} - -The Teleport Terraform provider supports the following resources: - - - [`teleport_access_list`](./resources/access_list.mdx) - - [`teleport_access_monitoring_rule`](./resources/access_monitoring_rule.mdx) - - [`teleport_app`](./resources/app.mdx) - - [`teleport_auth_preference`](./resources/auth_preference.mdx) - - [`teleport_bot`](./resources/bot.mdx) - - [`teleport_cluster_maintenance_config`](./resources/cluster_maintenance_config.mdx) - - [`teleport_cluster_networking_config`](./resources/cluster_networking_config.mdx) - - [`teleport_database`](./resources/database.mdx) - - [`teleport_github_connector`](./resources/github_connector.mdx) - - [`teleport_installer`](./resources/installer.mdx) - - [`teleport_login_rule`](./resources/login_rule.mdx) - - [`teleport_oidc_connector`](./resources/oidc_connector.mdx) - - [`teleport_okta_import_rule`](./resources/okta_import_rule.mdx) - - [`teleport_provision_token`](./resources/provision_token.mdx) - - [`teleport_role`](./resources/role.mdx) - - [`teleport_saml_connector`](./resources/saml_connector.mdx) - - [`teleport_server`](./resources/server.mdx) - - [`teleport_session_recording_config`](./resources/session_recording_config.mdx) - - [`teleport_static_host_user`](./resources/static_host_user.mdx) - - [`teleport_trusted_cluster`](./resources/trusted_cluster.mdx) - - [`teleport_trusted_device`](./resources/trusted_device.mdx) - - [`teleport_user`](./resources/user.mdx) diff --git a/docs/pages/reference/terraform-provider/resources/resources.mdx b/docs/pages/reference/terraform-provider/resources/resources.mdx new file mode 100644 index 0000000000000..ac150d8a43048 --- /dev/null +++ b/docs/pages/reference/terraform-provider/resources/resources.mdx @@ -0,0 +1,37 @@ +--- +title: "Terraform resources index" +description: "Index of all the datasources supported by the Teleport Terraform Provider" +--- + +{/*Auto-generated file. Do not edit.*/} +{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/} + +{/* + This file will be renamed data-sources.mdx during build time. + The template name is reserved by tfplugindocs so we suffix with -index. +*/} + +The Teleport Terraform provider supports the following resources: + + - [`teleport_access_list`](./access_list.mdx) + - [`teleport_access_monitoring_rule`](./access_monitoring_rule.mdx) + - [`teleport_app`](./app.mdx) + - [`teleport_auth_preference`](./auth_preference.mdx) + - [`teleport_bot`](./bot.mdx) + - [`teleport_cluster_maintenance_config`](./cluster_maintenance_config.mdx) + - [`teleport_cluster_networking_config`](./cluster_networking_config.mdx) + - [`teleport_database`](./database.mdx) + - [`teleport_github_connector`](./github_connector.mdx) + - [`teleport_installer`](./installer.mdx) + - [`teleport_login_rule`](./login_rule.mdx) + - [`teleport_oidc_connector`](./oidc_connector.mdx) + - [`teleport_okta_import_rule`](./okta_import_rule.mdx) + - [`teleport_provision_token`](./provision_token.mdx) + - [`teleport_role`](./role.mdx) + - [`teleport_saml_connector`](./saml_connector.mdx) + - [`teleport_server`](./server.mdx) + - [`teleport_session_recording_config`](./session_recording_config.mdx) + - [`teleport_static_host_user`](./static_host_user.mdx) + - [`teleport_trusted_cluster`](./trusted_cluster.mdx) + - [`teleport_trusted_device`](./trusted_device.mdx) + - [`teleport_user`](./user.mdx) diff --git a/docs/pages/reference/terraform-provider.mdx b/docs/pages/reference/terraform-provider/terraform-provider.mdx similarity index 84% rename from docs/pages/reference/terraform-provider.mdx rename to docs/pages/reference/terraform-provider/terraform-provider.mdx index 0c959e49ff397..d1a84f5b694aa 100644 --- a/docs/pages/reference/terraform-provider.mdx +++ b/docs/pages/reference/terraform-provider/terraform-provider.mdx @@ -14,10 +14,10 @@ It lists all the supported resources and their fields. To get started with the Terraform provider, you must start with [the installation -guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx). +guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx). Once you got a working provider, we recommend you to follow the ["Managing users and roles with IaC"]( -../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide. +../../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide. The provider exposes Teleport resources both as Terraform data-sources and Terraform resources. @@ -27,8 +27,8 @@ to create resources in Teleport. {/* Note: the awkward `resource-index` file names are here because `data-sources` is reserved by the generator for the catch-all resource template */} -- [list of supported resources](./terraform-provider/resources.mdx) -- [list of supported data-sources](./terraform-provider/data-sources.mdx) +- [list of supported resources](./resources/resources.mdx) +- [list of supported data-sources](./data-sources/data-sources.mdx) ## Example Usage @@ -81,7 +81,7 @@ provider "teleport" { This section lists the different ways of passing credentials to the Terraform provider. You can find which method fits your use case in the [Teleport Terraform provider setup -page](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) +page](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) ### With an identity file @@ -108,16 +108,16 @@ Detected security key tap ``` You can find more information in -the ["Run the Terraform provider locally" guide](../admin-guides/infrastructure-as-code/terraform-provider/local.mdx) +the ["Run the Terraform provider locally" guide](../../admin-guides/infrastructure-as-code/terraform-provider/local.mdx) #### Obtaining an identity file via `tbot` -`tbot` relies on [MachineID](../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew +`tbot` relies on [MachineID](../../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew short-lived credentials. Such credentials are harder to exfiltrate, and you can control more precisely who has access to which roles (e.g. you can allow only GitHub Actions pipelines targeting the `prod` environment to get certificates). You can follow [the Terraform Provider -guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot` +guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot` and have Terraform use its identity. #### Obtaining an identity file via `tctl auth sign` @@ -132,7 +132,7 @@ This auth method has the following limitations: - Such credentials are high-privileged and long-lived. They must be protected and rotated. - This auth method does not work against Teleport clusters with MFA set to `webauthn`. On such clusters, Teleport will reject any long-lived certificate and require - [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). + [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). ### With a token (native MachineID) @@ -140,11 +140,11 @@ Starting with 16.2, the Teleport Terraform provider can natively use MachineID ( cluster. The Terraform Provider will rely on its runtime (AWS, GCP, Kubernetes, CI/CD system) to prove its identity to Teleport. -You can use any [delegated join method](./join-methods.mdx#delegated-join-methods) by setting +You can use any [delegated join method](../join-methods.mdx#delegated-join-methods) by setting both `join_method` and `join_token` in the provider configuration. This setup is described in more details in -the ["Run the Teleport Terraform provider in CI or Cloud" guide](../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx). +the ["Run the Teleport Terraform provider in CI or Cloud" guide](../../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx). ### With key, certificate, and CA certificate @@ -160,7 +160,7 @@ This auth method has the following limitations: - Such credentials are high-privileged and long-lived. They must be protected and rotated. - This auth method does not work against Teleport clusters with MFA set to `webauthn`. On such clusters, Teleport will reject any long-lived certificate and require - [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). + [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). {/* schema generated by tfplugindocs */} ## Schema @@ -175,8 +175,8 @@ This auth method has the following limitations: - `identity_file` (String, Sensitive) Teleport identity file content. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE`. - `identity_file_base64` (String, Sensitive) Teleport identity file content base64 encoded. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE_BASE64`. - `identity_file_path` (String) Teleport identity file path. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE_PATH`. -- `join_method` (String) Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](./join-methods.mdx) for possible values, you must use [a delegated join method](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_METHOD`. -- `join_token` (String) Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_TOKEN`. +- `join_method` (String) Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](../join-methods.mdx) for possible values. You must use [a delegated join method](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_METHOD`. +- `join_token` (String) Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_TOKEN`. - `key_base64` (String, Sensitive) Base64 encoded TLS auth key. This can also be set with the environment variable `TF_TELEPORT_KEY_BASE64`. - `key_path` (String) Path to Teleport auth key file. This can also be set with the environment variable `TF_TELEPORT_KEY`. - `profile_dir` (String) Teleport profile path. This can also be set with the environment variable `TF_TELEPORT_PROFILE_PATH`. diff --git a/docs/vale-styles/structure/intro-paragraph.yml b/docs/vale-styles/structure/intro-paragraph.yml new file mode 100644 index 0000000000000..3bee6806e4d66 --- /dev/null +++ b/docs/vale-styles/structure/intro-paragraph.yml @@ -0,0 +1,34 @@ +# This style enforces the presence of an introductory paragraph before the first +# H2 of a docs page. +extends: script +level: error +message: There must be a brief intro paragraph before the first H2-level section of a docs page. Use this to describe the purpose of the guide so a reader can determine whether they should continue reading. If the guide introduces a feature, describe the purpose and benefits of the feature. If there is already an "Introduction" H2 or similar, remove the heading. +scope: raw +script: | + text := import("text") + getMatches := func() { + docSeparators := text.re_find(`\n?---\n`, scope, 2) + // This is probably not a valid MDX file, but let other linters handler the + // error. + if docSeparators == undefined || len(docSeparators) != 2 { + return [] + } + + // Get the first H2 section + firstH2 := text.re_find(`\n## \w`, scope, 1) + if firstH2 == undefined { + return [] + } + + initialText := text.substr(scope, docSeparators[1][0].end,firstH2[0][0].begin) + // Check for at least one non-empty line before the first H2. + if !text.re_match(`\n[^\n]+\n`, initialText) { + return [{ + begin: docSeparators[1][0].end, + end: firstH2[0][0].begin + }] + } + + } + + matches := getMatches() diff --git a/e b/e index c8b2aed1f1c9d..b23222d7c1c5a 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit c8b2aed1f1c9d059e8853163486214778dcb08b0 +Subproject commit b23222d7c1c5a747f41a95fb98d15e0073f7cd99 diff --git a/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl b/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl index 3b348c8c4a237..97071d35b014d 100644 --- a/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl +++ b/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl @@ -14,3 +14,8 @@ TELEPORT_ENABLE_POSTGRES=${enable_postgres_listener} USE_ACM=${use_acm} USE_TLS_ROUTING=${use_tls_routing} EOF +cat >>/etc/default/teleport < /path/to/tmp/file' into the session (dumping the value of DISPLAY into the temp file) _, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v > %s\n\r", x11.DisplayEnv, tmpFile.Name()))) @@ -4749,7 +4749,7 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) { assert.Eventually(t, func() bool { output, err := os.ReadFile(tmpFile.Name()) if err == nil && len(output) != 0 { - display = strings.TrimSpace(string(output)) + display <- strings.TrimSpace(string(output)) return true } return false @@ -4757,7 +4757,7 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) { }, 10*time.Second, time.Second) // Make a new connection to the XServer proxy to confirm that forwarding is working. - serverDisplay, err := x11.ParseDisplay(display) + serverDisplay, err := x11.ParseDisplay(<-display) require.NoError(t, err) conn, err := serverDisplay.Dial() diff --git a/integrations/access/email/app.go b/integrations/access/email/app.go index ba33bccc6887f..8e38af504a56a 100644 --- a/integrations/access/email/app.go +++ b/integrations/access/email/app.go @@ -122,6 +122,7 @@ func (a *App) run(ctx context.Context) error { // init inits plugin func (a *App) init(ctx context.Context) error { + log := logger.Get(ctx) ctx, cancel := context.WithTimeout(ctx, initTimeout) defer cancel() @@ -145,6 +146,11 @@ func (a *App) init(ctx context.Context) error { return trace.Wrap(err) } + log.Debug("Starting client connection health check...") + if err = a.client.CheckHealth(ctx); err != nil { + return trace.Wrap(err, "client connection health check failed") + } + log.Debug("Client connection health check finished ok") return nil } diff --git a/integrations/access/email/client.go b/integrations/access/email/client.go index ce1b8cad48400..b65516962d8c4 100644 --- a/integrations/access/email/client.go +++ b/integrations/access/email/client.go @@ -59,12 +59,12 @@ func NewClient(ctx context.Context, conf Config, clusterName, webProxyAddr strin } if conf.Mailgun != nil { - mailer = NewMailgunMailer(*conf.Mailgun, conf.Delivery.Sender, clusterName) + mailer = NewMailgunMailer(*conf.Mailgun, conf.StatusSink, conf.Delivery.Sender, clusterName, conf.RoleToRecipients[types.Wildcard]) logger.Get(ctx).WithField("domain", conf.Mailgun.Domain).Info("Using Mailgun as email transport") } if conf.SMTP != nil { - mailer = NewSMTPMailer(*conf.SMTP, conf.Delivery.Sender, clusterName) + mailer = NewSMTPMailer(*conf.SMTP, conf.StatusSink, conf.Delivery.Sender, clusterName) logger.Get(ctx).WithFields(logger.Fields{ "host": conf.SMTP.Host, "port": conf.SMTP.Port, @@ -79,6 +79,11 @@ func NewClient(ctx context.Context, conf Config, clusterName, webProxyAddr strin }, nil } +// CheckHealth checks if the Email client connection is healthy. +func (c *Client) CheckHealth(ctx context.Context) error { + return trace.Wrap(c.mailer.CheckHealth(ctx)) +} + // SendNewThreads sends emails on new requests. Returns EmailData. func (c *Client) SendNewThreads(ctx context.Context, recipients []string, reqID string, reqData RequestData) ([]EmailThread, error) { var threads []EmailThread diff --git a/integrations/access/email/config.go b/integrations/access/email/config.go index 7fb4dd454efbe..f9cfe69ebc952 100644 --- a/integrations/access/email/config.go +++ b/integrations/access/email/config.go @@ -68,9 +68,13 @@ type Config struct { Log logger.Config `toml:"log"` // Teleport is a handle to the client to use when communicating with - // the Teleport auth server. The Datadog app will create a gRPC-based + // the Teleport auth server. The Email app will create a gRPC-based // client on startup if this is not set. Client teleport.Client + + // StatusSink receives any status updates from the plugin for + // further processing. Status updates will be ignored if not set. + StatusSink common.StatusSink } // LoadConfig reads the config file, initializes a new Config struct object, and returns it. diff --git a/integrations/access/email/mailers.go b/integrations/access/email/mailers.go index 5f690bb70256a..17864322fbc4b 100644 --- a/integrations/access/email/mailers.go +++ b/integrations/access/email/mailers.go @@ -21,6 +21,7 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "net/http" "os" "strconv" "strings" @@ -29,11 +30,25 @@ import ( "github.com/gravitational/trace" "github.com/mailgun/mailgun-go/v4" "gopkg.in/mail.v2" + + "github.com/gravitational/teleport/integrations/access/common" + "github.com/gravitational/teleport/integrations/lib/logger" +) + +const ( + // statusEmitTimeout specifies the max timeout to emit status. + statusEmitTimeout = 10 * time.Second + // mailgunHTTPTimeout specifies the max timeout for mailgun api send request. + mailgunHTTPTimeout = 10 * time.Second + // smtpDialerTimeout specifies the max timeout for smtp dialer read/write operations. + smtpDialerTimeout = 10 * time.Second ) // Mailer is an interface to mail sender type Mailer interface { Send(ctx context.Context, id, recipient, body, references string) (string, error) + // CheckHealth checks if the Email client connection is healthy. + CheckHealth(ctx context.Context) error } // SMTPMailer implements SMTP mailer @@ -41,6 +56,7 @@ type SMTPMailer struct { dialer *mail.Dialer sender string clusterName string + sink common.StatusSink } // MailgunMailer implements mailgun mailer @@ -48,23 +64,59 @@ type MailgunMailer struct { mailgun *mailgun.MailgunImpl sender string clusterName string + + // fallbackRecipients specifies the list of default recipients. + // This is only used for initial health check. + fallbackRecipients []string } // NewSMTPMailer inits new SMTP mailer -func NewSMTPMailer(c SMTPConfig, sender, clusterName string) Mailer { +func NewSMTPMailer(c SMTPConfig, sink common.StatusSink, sender, clusterName string) Mailer { dialer := mail.NewDialer(c.Host, c.Port, c.Username, c.Password) dialer.StartTLSPolicy = c.MailStartTLSPolicy + dialer.Timeout = smtpDialerTimeout - return &SMTPMailer{dialer, sender, clusterName} + return &SMTPMailer{ + dialer: dialer, + sender: sender, + clusterName: clusterName, + sink: sink, + } } // NewMailgunMailer inits new Mailgun mailer -func NewMailgunMailer(c MailgunConfig, sender, clusterName string) Mailer { +func NewMailgunMailer(c MailgunConfig, sink common.StatusSink, sender, clusterName string, fallbackRecipients []string) Mailer { m := mailgun.NewMailgun(c.Domain, c.PrivateKey) if c.APIBase != "" { m.SetAPIBase(c.APIBase) } - return &MailgunMailer{m, sender, clusterName} + client := &http.Client{ + Transport: &statusSinkTransport{ + RoundTripper: http.DefaultTransport, + sink: sink, + }, + } + m.SetClient(client) + return &MailgunMailer{ + mailgun: m, + sender: sender, + clusterName: clusterName, + fallbackRecipients: fallbackRecipients, + } +} + +// CheckHealth checks the health of the SMTP service. +func (m *SMTPMailer) CheckHealth(ctx context.Context) error { + log := logger.Get(ctx) + client, err := m.dialer.Dial() + m.emitStatus(ctx, err) + if err != nil { + return trace.Wrap(err) + } + if err := client.Close(); err != nil { + log.Debug("Failed to close client connection after health check") + } + return nil } // Send sends email via SMTP @@ -91,10 +143,10 @@ func (m *SMTPMailer) Send(ctx context.Context, id, recipient, body, references s } err = m.dialer.DialAndSend(msg) + m.emitStatus(ctx, err) if err != nil { return "", trace.Wrap(err) } - return id, nil } @@ -123,6 +175,38 @@ func (m *SMTPMailer) base36(input uint64) string { return strings.ToUpper(strconv.FormatUint(input, 36)) } +// emitStatus emits status based on provided statusErr. +func (m *SMTPMailer) emitStatus(ctx context.Context, statusErr error) { + if m.sink == nil { + return + } + + ctx, cancel := context.WithTimeout(ctx, statusEmitTimeout) + defer cancel() + + log := logger.Get(ctx) + code := http.StatusOK + if statusErr != nil { + // Returned error is undocumented. Using generic error code for all errors. + code = http.StatusInternalServerError + } + if err := m.sink.Emit(ctx, common.StatusFromStatusCode(code)); err != nil { + log.WithError(err).Error("Error while emitting Email plugin status") + } +} + +// CheckHealth checks the health of the Mailgun service. +func (m *MailgunMailer) CheckHealth(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, mailgunHTTPTimeout) + defer cancel() + + msg := m.mailgun.NewMessage(m.sender, "Health Check", "Testing Mailgun API connection...", m.fallbackRecipients...) + msg.SetRequireTLS(true) + msg.EnableTestMode() // Test message submission without delivering to recipients. + _, _, err := m.mailgun.Send(ctx, msg) + return trace.Wrap(err) +} + // Send sends email via Mailgun func (m *MailgunMailer) Send(ctx context.Context, id, recipient, body, references string) (string, error) { subject := fmt.Sprintf("%v Role Request %v", m.clusterName, id) @@ -136,7 +220,7 @@ func (m *MailgunMailer) Send(ctx context.Context, id, recipient, body, reference msg.AddHeader("In-Reply-To", refHeader) } - ctx, cancel := context.WithTimeout(ctx, time.Second*10) + ctx, cancel := context.WithTimeout(ctx, mailgunHTTPTimeout) defer cancel() _, id, err := m.mailgun.Send(ctx, msg) @@ -147,3 +231,29 @@ func (m *MailgunMailer) Send(ctx context.Context, id, recipient, body, reference return id, nil } + +// statusSinkTransport wraps the Mailgun client transport and +// emits plugin status. +type statusSinkTransport struct { + http.RoundTripper + sink common.StatusSink +} + +// RoundTrip implements the http.RoundTripper interface. +func (t *statusSinkTransport) RoundTrip(req *http.Request) (*http.Response, error) { + log := logger.Get(req.Context()) + resp, err := t.RoundTripper.RoundTrip(req) + if err != nil { + return nil, trace.Wrap(err) + } + if t.sink != nil { + ctx, cancel := context.WithTimeout(req.Context(), statusEmitTimeout) + defer cancel() + + status := common.StatusFromStatusCode(resp.StatusCode) + if err := t.sink.Emit(ctx, status); err != nil { + log.WithError(err).Error("Error while emitting Email plugin status") + } + } + return resp, nil +} diff --git a/integrations/access/email/testlib/mock_mailgun.go b/integrations/access/email/testlib/mock_mailgun.go index 8b0a856d9bc73..58cbbc8ebb098 100644 --- a/integrations/access/email/testlib/mock_mailgun.go +++ b/integrations/access/email/testlib/mock_mailgun.go @@ -63,6 +63,13 @@ func newMockMailgunServer(concurrency int) *mockMailgunServer { id := uuid.New().String() + // The testmode flag is only used during health check. + // Do no create message when in testmode. + if r.PostFormValue("o:testmode") == "yes" { + fmt.Fprintf(w, `{"id": "%v"}`, id) + return + } + message := mockMailgunMessage{ ID: id, Sender: r.PostFormValue("from"), diff --git a/integrations/terraform/gen/docs.sh b/integrations/terraform/gen/docs.sh index eba48091d57ce..f6570db4b41a3 100755 --- a/integrations/terraform/gen/docs.sh +++ b/integrations/terraform/gen/docs.sh @@ -67,15 +67,15 @@ info "Converting .md files to .mdx" cd "$TMPDIR/docs" find . -iname '*.md' -type f -exec sh -c 'i="$1"; mv "$i" "${i%.md}.mdx"' shell {} \; # renaming the resources and data-sources indexes because the names were reserved by the generator -mv "$TMPDIR/docs/resources-index.mdx" "$TMPDIR/docs/resources.mdx" -mv "$TMPDIR/docs/data-sources-index.mdx" "$TMPDIR/docs/data-sources.mdx" +mv "$TMPDIR/docs/resources-index.mdx" "$TMPDIR/docs/resources/resources.mdx" +mv "$TMPDIR/docs/data-sources-index.mdx" "$TMPDIR/docs/data-sources/data-sources.mdx" info "Copying generated documentation into the teleport docs directory" # Removing the apex terraform.mdx -rm -rf "$DOCSDIR" "$DOCSDIR.mdx" +rm -rf "$DOCSDIR" "$DOCSDIR/terraform-provider.mdx" cp -r "$TMPDIR/docs" "$DOCSDIR" # unpacking the index to the apex terraform.mdx -mv "$DOCSDIR/index.mdx" "$DOCSDIR.mdx" +mv "$DOCSDIR/index.mdx" "$DOCSDIR/terraform-provider.mdx" -info "TF documentation successfully generated" \ No newline at end of file +info "TF documentation successfully generated" diff --git a/integrations/terraform/provider/provider.go b/integrations/terraform/provider/provider.go index dfc0d9b9a14c3..1f1a923a60c91 100644 --- a/integrations/terraform/provider/provider.go +++ b/integrations/terraform/provider/provider.go @@ -247,13 +247,13 @@ func (p *Provider) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) Type: types.StringType, Sensitive: false, Optional: true, - Description: fmt.Sprintf("Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](./join-methods.mdx) for possible values, you must use [a delegated join method](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinMethod), + Description: fmt.Sprintf("Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](../join-methods.mdx) for possible values. You must use [a delegated join method](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinMethod), }, attributeTerraformJoinToken: { Type: types.StringType, Sensitive: false, Optional: true, - Description: fmt.Sprintf("Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinToken), + Description: fmt.Sprintf("Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinToken), }, attributeTerraformJoinAudienceTag: { Type: types.StringType, diff --git a/integrations/terraform/templates/data-sources-index.mdx.tmpl b/integrations/terraform/templates/data-sources-index.mdx.tmpl index c4c7b90af7525..9eac755076952 100644 --- a/integrations/terraform/templates/data-sources-index.mdx.tmpl +++ b/integrations/terraform/templates/data-sources-index.mdx.tmpl @@ -13,5 +13,5 @@ description: "Index of all the data-sources supported by the Teleport Terraform The Teleport Terraform provider supports the following data-sources: {{ range $key, $value := .DataSourceFiles }} - - [`{{$key}}`](./data-sources/{{$value}}.mdx) + - [`{{$key}}`](./{{$value}}.mdx) {{- end }} diff --git a/integrations/terraform/templates/index.md.tmpl b/integrations/terraform/templates/index.md.tmpl index 15bc1c7c81fa5..488665209f78a 100644 --- a/integrations/terraform/templates/index.md.tmpl +++ b/integrations/terraform/templates/index.md.tmpl @@ -14,10 +14,10 @@ It lists all the supported resources and their fields. To get started with the Terraform provider, you must start with [the installation -guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx). +guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx). Once you got a working provider, we recommend you to follow the ["Managing users and roles with IaC"]( -../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide. +../../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide. The provider exposes Teleport resources both as Terraform data-sources and Terraform resources. @@ -27,8 +27,8 @@ to create resources in Teleport. {/* Note: the awkward `resource-index` file names are here because `data-sources` is reserved by the generator for the catch-all resource template */} -- [list of supported resources](./terraform-provider/resources.mdx) -- [list of supported data-sources](./terraform-provider/data-sources.mdx) +- [list of supported resources](./resources/resources.mdx) +- [list of supported data-sources](./data-sources/data-sources.mdx) ## Example Usage @@ -81,7 +81,7 @@ provider "teleport" { This section lists the different ways of passing credentials to the Terraform provider. You can find which method fits your use case in the [Teleport Terraform provider setup -page](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) +page](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) ### With an identity file @@ -108,16 +108,16 @@ Detected security key tap ``` You can find more information in -the ["Run the Terraform provider locally" guide](../admin-guides/infrastructure-as-code/terraform-provider/local.mdx) +the ["Run the Terraform provider locally" guide](../../admin-guides/infrastructure-as-code/terraform-provider/local.mdx) #### Obtaining an identity file via `tbot` -`tbot` relies on [MachineID](../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew +`tbot` relies on [MachineID](../../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew short-lived credentials. Such credentials are harder to exfiltrate, and you can control more precisely who has access to which roles (e.g. you can allow only GitHub Actions pipelines targeting the `prod` environment to get certificates). You can follow [the Terraform Provider -guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot` +guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot` and have Terraform use its identity. #### Obtaining an identity file via `tctl auth sign` @@ -132,7 +132,7 @@ This auth method has the following limitations: - Such credentials are high-privileged and long-lived. They must be protected and rotated. - This auth method does not work against Teleport clusters with MFA set to `webauthn`. On such clusters, Teleport will reject any long-lived certificate and require - [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). + [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). ### With a token (native MachineID) @@ -140,11 +140,11 @@ Starting with 16.2, the Teleport Terraform provider can natively use MachineID ( cluster. The Terraform Provider will rely on its runtime (AWS, GCP, Kubernetes, CI/CD system) to prove its identity to Teleport. -You can use any [delegated join method](./join-methods.mdx#delegated-join-methods) by setting +You can use any [delegated join method](../join-methods.mdx#delegated-join-methods) by setting both `join_method` and `join_token` in the provider configuration. This setup is described in more details in -the ["Run the Teleport Terraform provider in CI or Cloud" guide](../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx). +the ["Run the Teleport Terraform provider in CI or Cloud" guide](../../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx). ### With key, certificate, and CA certificate @@ -160,7 +160,7 @@ This auth method has the following limitations: - Such credentials are high-privileged and long-lived. They must be protected and rotated. - This auth method does not work against Teleport clusters with MFA set to `webauthn`. On such clusters, Teleport will reject any long-lived certificate and require - [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). + [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx). {{ .SchemaMarkdown | trimspace }} diff --git a/integrations/terraform/templates/resources-index.mdx.tmpl b/integrations/terraform/templates/resources-index.mdx.tmpl index 42f5821dfbca9..00167441cf03b 100644 --- a/integrations/terraform/templates/resources-index.mdx.tmpl +++ b/integrations/terraform/templates/resources-index.mdx.tmpl @@ -13,5 +13,5 @@ description: "Index of all the datasources supported by the Teleport Terraform P The Teleport Terraform provider supports the following resources: {{ range $key, $value := .ResourceFiles }} - - [`{{$key}}`](./resources/{{$value}}.mdx) + - [`{{$key}}`](./{{$value}}.mdx) {{- end }} diff --git a/lib/auth/accesspoint/accesspoint.go b/lib/auth/accesspoint/accesspoint.go index 5b0d4b6084f07..d9ac852bba65b 100644 --- a/lib/auth/accesspoint/accesspoint.go +++ b/lib/auth/accesspoint/accesspoint.go @@ -103,6 +103,7 @@ type Config struct { Users services.UsersService WebSession types.WebSessionInterface WebToken types.WebTokenInterface + DynamicWindowsDesktops services.DynamicWindowsDesktops WindowsDesktops services.WindowsDesktops AutoUpdateService services.AutoUpdateServiceGetter ProvisioningStates services.ProvisioningStates @@ -201,6 +202,7 @@ func NewCache(cfg Config) (*cache.Cache, error) { WebSession: cfg.WebSession, WebToken: cfg.WebToken, WindowsDesktops: cfg.WindowsDesktops, + DynamicWindowsDesktops: cfg.DynamicWindowsDesktops, ProvisioningStates: cfg.ProvisioningStates, IdentityCenter: cfg.IdentityCenter, } diff --git a/lib/auth/dynamicwindows/dynamicwindowsv1/service.go b/lib/auth/dynamicwindows/dynamicwindowsv1/service.go index 98bc2f81e6b55..5a42eefe8edca 100644 --- a/lib/auth/dynamicwindows/dynamicwindowsv1/service.go +++ b/lib/auth/dynamicwindows/dynamicwindowsv1/service.go @@ -196,6 +196,31 @@ func (s *Service) UpdateDynamicWindowsDesktop(ctx context.Context, req *dynamicw return updatedDesktop, nil } +// UpsertDynamicWindowsDesktop updates an existing dynamic Windows desktop or creates one if it doesn't exist. +func (s *Service) UpsertDynamicWindowsDesktop(ctx context.Context, req *dynamicwindowspb.UpsertDynamicWindowsDesktopRequest) (*types.DynamicWindowsDesktopV1, error) { + auth, err := s.authorizer.Authorize(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + if err := auth.AuthorizeAdminAction(); err != nil { + return nil, trace.Wrap(err) + } + if err := auth.CheckAccessToKind(types.KindDynamicWindowsDesktop, types.VerbCreate, types.VerbUpdate); err != nil { + return nil, trace.Wrap(err) + } + d, err := s.backend.UpsertDynamicWindowsDesktop(ctx, req.Desktop) + if err != nil { + return nil, trace.Wrap(err) + } + + updatedDesktop, ok := d.(*types.DynamicWindowsDesktopV1) + if !ok { + return nil, trace.BadParameter("unexpected type %T", d) + } + + return updatedDesktop, nil +} + // DeleteDynamicWindowsDesktop removes the specified dynamic Windows desktop. func (s *Service) DeleteDynamicWindowsDesktop(ctx context.Context, req *dynamicwindowspb.DeleteDynamicWindowsDesktopRequest) (*emptypb.Empty, error) { auth, err := s.authorizer.Authorize(ctx) diff --git a/lib/auth/dynamicwindows/dynamicwindowsv1/service_test.go b/lib/auth/dynamicwindows/dynamicwindowsv1/service_test.go index 8fee09af10dfb..1c474f7192be9 100644 --- a/lib/auth/dynamicwindows/dynamicwindowsv1/service_test.go +++ b/lib/auth/dynamicwindows/dynamicwindowsv1/service_test.go @@ -55,6 +55,11 @@ func TestServiceAccess(t *testing.T) { allowedStates: []authz.AdminActionAuthState{authz.AdminActionAuthNotRequired, authz.AdminActionAuthMFAVerified}, allowedVerbs: []string{types.VerbUpdate}, }, + { + name: "UpsertDynamicWindowsDesktop", + allowedStates: []authz.AdminActionAuthState{authz.AdminActionAuthNotRequired, authz.AdminActionAuthMFAVerified}, + allowedVerbs: []string{types.VerbCreate, types.VerbUpdate}, + }, { name: "DeleteDynamicWindowsDesktop", allowedStates: []authz.AdminActionAuthState{authz.AdminActionAuthNotRequired, authz.AdminActionAuthMFAVerified}, @@ -160,6 +165,10 @@ func callMethod(service *Service, method string) error { arg.Desktop, _ = types.NewDynamicWindowsDesktopV1("test", nil, types.DynamicWindowsDesktopSpecV1{ Addr: "test", }) + case *dynamicwindowsv1.UpsertDynamicWindowsDesktopRequest: + arg.Desktop, _ = types.NewDynamicWindowsDesktopV1("test", nil, types.DynamicWindowsDesktopSpecV1{ + Addr: "test", + }) } return nil }, nil) diff --git a/lib/auth/helpers.go b/lib/auth/helpers.go index 851cca043ad92..9553f2697cf8c 100644 --- a/lib/auth/helpers.go +++ b/lib/auth/helpers.go @@ -366,6 +366,7 @@ func NewTestAuthServer(cfg TestAuthServerConfig) (*TestAuthServer, error) { WebSession: svces.Identity.WebSessions(), WebToken: svces.WebTokens(), WindowsDesktops: svces.WindowsDesktops, + DynamicWindowsDesktops: svces.DynamicWindowsDesktops, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/auth/integration/integrationv1/service_test.go b/lib/auth/integration/integrationv1/service_test.go index 0fd6355ebb8f5..3316cf239d0e8 100644 --- a/lib/auth/integration/integrationv1/service_test.go +++ b/lib/auth/integration/integrationv1/service_test.go @@ -280,6 +280,29 @@ func TestIntegrationCRUD(t *testing.T) { }, ErrAssertion: trace.IsBadParameter, }, + { + Name: "can't delete integration referenced by AWS Identity Center plugin", + Role: types.RoleSpecV6{ + Allow: types.RoleConditions{Rules: []types.Rule{{ + Resources: []string{types.KindIntegration}, + Verbs: []string{types.VerbDelete}, + }}}, + }, + Setup: func(t *testing.T, igName string) { + _, err := localClient.CreateIntegration(ctx, sampleIntegrationFn(t, igName)) + require.NoError(t, err) + require.NoError(t, localClient.CreatePlugin(ctx, newPlugin(t, igName))) + }, + Test: func(ctx context.Context, resourceSvc *Service, igName string) error { + _, err := resourceSvc.DeleteIntegration(ctx, &integrationpb.DeleteIntegrationRequest{Name: igName}) + return err + }, + Cleanup: func(t *testing.T, igName string) { + err := localClient.DeletePlugin(ctx, newPlugin(t, igName).GetName()) + require.NoError(t, err) + }, + ErrAssertion: trace.IsBadParameter, + }, { Name: "access to delete integration", Role: types.RoleSpecV6{ @@ -369,6 +392,8 @@ type localClient interface { DeleteDraftExternalAuditStorage(ctx context.Context) error PromoteToClusterExternalAuditStorage(ctx context.Context) error DisableClusterExternalAuditStorage(ctx context.Context) error + CreatePlugin(ctx context.Context, plugin types.Plugin) error + DeletePlugin(ctx context.Context, name string) error } type testClient struct { @@ -390,6 +415,7 @@ func initSvc(t *testing.T, ca types.CertAuthority, clusterName string, proxyPubl userSvc, err := local.NewTestIdentityService(backend) require.NoError(t, err) easSvc := local.NewExternalAuditStorageService(backend) + pluginSvc := local.NewPluginsService(backend) _, err = clusterConfigSvc.UpsertAuthPreference(ctx, types.DefaultAuthPreference()) require.NoError(t, err) @@ -455,11 +481,13 @@ func initSvc(t *testing.T, ca types.CertAuthority, clusterName string, proxyPubl *local.IdentityService *local.ExternalAuditStorageService *local.IntegrationsService + *local.PluginsService }{ AccessService: roleSvc, IdentityService: userSvc, ExternalAuditStorageService: easSvc, IntegrationsService: localResourceService, + PluginsService: pluginSvc, }, resourceSvc } @@ -525,3 +553,28 @@ func newCertAuthority(t *testing.T, caType types.CertAuthType, domain string) ty return ca } + +func newPlugin(t *testing.T, integrationName string) *types.PluginV1 { + t.Helper() + return &types.PluginV1{ + Metadata: types.Metadata{ + Name: types.PluginTypeAWSIdentityCenter, + Labels: map[string]string{ + types.HostedPluginLabel: "true", + }, + }, + Spec: types.PluginSpecV1{ + Settings: &types.PluginSpecV1_AwsIc{ + AwsIc: &types.PluginAWSICSettings{ + IntegrationName: integrationName, + Region: "test-region", + Arn: "test-arn", + AccessListDefaultOwners: []string{"user1", "user2"}, + ProvisioningSpec: &types.AWSICProvisioningSpec{ + BaseUrl: "https://example.com", + }, + }, + }, + }, + } +} diff --git a/lib/auth/trustedcluster.go b/lib/auth/trustedcluster.go index bd68f9d10832e..acbc46dc4f281 100644 --- a/lib/auth/trustedcluster.go +++ b/lib/auth/trustedcluster.go @@ -45,129 +45,115 @@ import ( ) // UpsertTrustedCluster creates or toggles a Trusted Cluster relationship. -func (a *Server) UpsertTrustedCluster(ctx context.Context, trustedCluster types.TrustedCluster) (newTrustedCluster types.TrustedCluster, returnErr error) { +func (a *Server) UpsertTrustedCluster(ctx context.Context, tc types.TrustedCluster) (newTrustedCluster types.TrustedCluster, returnErr error) { + // verify that trusted cluster role map does not reference non-existent roles + if err := a.checkLocalRoles(ctx, tc.GetRoleMap()); err != nil { + return nil, trace.Wrap(err) + } + // It is recommended to omit trusted cluster name because the trusted cluster name // is updated to the roots cluster name during the handshake with the root cluster. var existingCluster types.TrustedCluster - if trustedCluster.GetName() != "" { + var cas []types.CertAuthority + if tc.GetName() != "" { var err error - existingCluster, err = a.GetTrustedCluster(ctx, trustedCluster.GetName()) + existingCluster, err = a.GetTrustedCluster(ctx, tc.GetName()) if err != nil && !trace.IsNotFound(err) { return nil, trace.Wrap(err) } } - enable := trustedCluster.GetEnabled() - - // If the trusted cluster already exists in the backend, make sure it's a - // valid state change client is trying to make. - if existingCluster != nil { - if err := existingCluster.CanChangeStateTo(trustedCluster); err != nil { - return nil, trace.Wrap(err) - } + // if there is no existing cluster, switch to the create case + if existingCluster == nil { + return a.createTrustedCluster(ctx, tc) } - logger := log.WithField("trusted_cluster", trustedCluster.GetName()) + if err := existingCluster.CanChangeStateTo(tc); err != nil { + return nil, trace.Wrap(err) + } - // change state - if err := a.checkLocalRoles(ctx, trustedCluster.GetRoleMap()); err != nil { + // always load all current CAs. even if we aren't changing them as part of + // this function, Services.UpdateTrustedCluster will only correctly activate/deactivate + // CAs that are explicitly passed to it. note that we pass in the existing cluster state + // since where CAs are stored depends on the current state of the trusted cluster. + cas, err := a.getCAsForTrustedCluster(ctx, existingCluster) + if err != nil { return nil, trace.Wrap(err) } - // Update role map - if existingCluster != nil && !existingCluster.GetRoleMap().IsEqual(trustedCluster.GetRoleMap()) { - if err := a.UpdateUserCARoleMap(ctx, existingCluster.GetName(), trustedCluster.GetRoleMap(), - existingCluster.GetEnabled()); err != nil { - return nil, trace.Wrap(err) - } + // propagate any role map changes to cas + configureCAsForTrustedCluster(tc, cas) - // Reset previous UserCA role map if this func fails later on - defer func() { - if returnErr != nil { - if err := a.UpdateUserCARoleMap(ctx, trustedCluster.GetName(), existingCluster.GetRoleMap(), - trustedCluster.GetEnabled()); err != nil { - returnErr = trace.NewAggregate(err, returnErr) - } - } - }() - } - // Create or update state - switch { - case existingCluster != nil && enable == true: - if existingCluster.GetEnabled() { - break - } - log.Debugf("Enabling existing Trusted Cluster relationship.") + // state transition is valid, set the expected revision + tc.SetRevision(existingCluster.GetRevision()) - if err := a.activateCertAuthority(ctx, trustedCluster); err != nil { - if trace.IsNotFound(err) { - return nil, trace.BadParameter("enable only supported for Trusted Clusters created with Teleport 2.3 and above") - } - return nil, trace.Wrap(err) - } + revision, err := a.Services.UpdateTrustedCluster(ctx, tc, cas) + if err != nil { + return nil, trace.Wrap(err) + } - if err := a.createReverseTunnel(ctx, trustedCluster); err != nil { - return nil, trace.Wrap(err) - } - case existingCluster != nil && enable == false: - if !existingCluster.GetEnabled() { - break - } - log.Debugf("Disabling existing Trusted Cluster relationship.") + tc.SetRevision(revision) - if err := a.deactivateCertAuthority(ctx, trustedCluster); err != nil { - if trace.IsNotFound(err) { - return nil, trace.BadParameter("enable only supported for Trusted Clusters created with Teleport 2.3 and above") - } - return nil, trace.Wrap(err) - } + if err := a.onTrustedClusterWrite(ctx, tc); err != nil { + return nil, trace.Wrap(err) + } - if err := a.DeleteReverseTunnel(ctx, trustedCluster.GetName()); err != nil { - return nil, trace.Wrap(err) - } - case existingCluster == nil && enable == true: - logger.Info("Creating enabled Trusted Cluster relationship.") + return tc, nil +} - remoteCAs, err := a.establishTrust(ctx, trustedCluster) - if err != nil { - return nil, trace.Wrap(err) - } +func (a *Server) createTrustedCluster(ctx context.Context, tc types.TrustedCluster) (types.TrustedCluster, error) { + remoteCAs, err := a.establishTrust(ctx, tc) + if err != nil { + return nil, trace.Wrap(err) + } - // Force name of the trusted cluster resource - // to be equal to the name of the remote cluster it is connecting to. - trustedCluster.SetName(remoteCAs[0].GetClusterName()) + // Force name to the name of the trusted cluster. + tc.SetName(remoteCAs[0].GetClusterName()) - if err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil { - return nil, trace.Wrap(err) - } + // perform some configuration on the remote CAs + configureCAsForTrustedCluster(tc, remoteCAs) - if err := a.createReverseTunnel(ctx, trustedCluster); err != nil { - return nil, trace.Wrap(err) - } + // atomically create trusted cluster and cert authorities + revision, err := a.Services.CreateTrustedCluster(ctx, tc, remoteCAs) + if err != nil { + return nil, trace.Wrap(err) + } - case existingCluster == nil && enable == false: - logger.Info("Creating disabled Trusted Cluster relationship.") + tc.SetRevision(revision) - remoteCAs, err := a.establishTrust(ctx, trustedCluster) - if err != nil { - return nil, trace.Wrap(err) - } + if err := a.onTrustedClusterWrite(ctx, tc); err != nil { + return nil, trace.Wrap(err) + } - // Force name to the name of the trusted cluster. - trustedCluster.SetName(remoteCAs[0].GetClusterName()) + return tc, nil +} - if err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil { - return nil, trace.Wrap(err) - } +// configureCAsForTrustedCluster modifies remote CAs for use as trusted cluster CAs. +func configureCAsForTrustedCluster(tc types.TrustedCluster, cas []types.CertAuthority) { + // modify the remote CAs for use as tc cas. + for _, ca := range cas { + // change the name of the remote ca to the name of the trusted cluster. + ca.SetName(tc.GetName()) - if err := a.deactivateCertAuthority(ctx, trustedCluster); err != nil { - return nil, trace.Wrap(err) + // wipe out roles sent from the remote cluster and set roles from the trusted cluster + ca.SetRoles(nil) + if ca.GetType() == types.UserCA { + for _, r := range tc.GetRoles() { + ca.AddRole(r) + } + ca.SetRoleMap(tc.GetRoleMap()) } } +} - tc, err := a.Services.UpsertTrustedCluster(ctx, trustedCluster) - if err != nil { - return nil, trace.Wrap(err) +func (a *Server) onTrustedClusterWrite(ctx context.Context, tc types.TrustedCluster) error { + var cerr error + if tc.GetEnabled() { + cerr = a.createReverseTunnel(ctx, tc) + } else { + if err := a.DeleteReverseTunnel(ctx, tc.GetName()); err != nil && !trace.IsNotFound(err) { + cerr = err + } } if err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterCreate{ @@ -177,14 +163,14 @@ func (a *Server) UpsertTrustedCluster(ctx context.Context, trustedCluster types. }, UserMetadata: authz.ClientUserMetadata(ctx), ResourceMetadata: apievents.ResourceMetadata{ - Name: trustedCluster.GetName(), + Name: tc.GetName(), }, ConnectionMetadata: authz.ConnectionMetadata(ctx), }); err != nil { - logger.WithError(err).Warn("Failed to emit trusted cluster create event.") + a.logger.WarnContext(ctx, "failed to emit trusted cluster create event", "error", err) } - return tc, nil + return trace.Wrap(cerr) } func (a *Server) checkLocalRoles(ctx context.Context, roleMap types.RoleMap) error { @@ -207,6 +193,29 @@ func (a *Server) checkLocalRoles(ctx context.Context, roleMap types.RoleMap) err return nil } +func (a *Server) getCAsForTrustedCluster(ctx context.Context, tc types.TrustedCluster) ([]types.CertAuthority, error) { + var cas []types.CertAuthority + // not all CA types are present for trusted clusters, but there isn't a meaningful downside to + // just grabbing everything. + for _, caType := range types.CertAuthTypes { + var ca types.CertAuthority + var err error + if tc.GetEnabled() { + ca, err = a.GetCertAuthority(ctx, types.CertAuthID{Type: caType, DomainName: tc.GetName()}, false) + } else { + ca, err = a.GetInactiveCertAuthority(ctx, types.CertAuthID{Type: caType, DomainName: tc.GetName()}, false) + } + if err != nil { + if trace.IsNotFound(err) { + continue + } + return nil, trace.Wrap(err) + } + cas = append(cas, ca) + } + return cas, nil +} + // DeleteTrustedCluster removes types.CertAuthority, services.ReverseTunnel, // and services.TrustedCluster resources. func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error { @@ -229,7 +238,7 @@ func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error { }) } - if err := a.DeleteCertAuthorities(ctx, ids...); err != nil { + if err := a.Services.DeleteTrustedClusterInternal(ctx, name, ids); err != nil { return trace.Wrap(err) } @@ -239,10 +248,6 @@ func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error { } } - if err := a.Services.DeleteTrustedCluster(ctx, name); err != nil { - return trace.Wrap(err) - } - if err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterDelete{ Metadata: apievents.Metadata{ Type: events.TrustedClusterDeleteEvent, @@ -324,54 +329,30 @@ func (a *Server) establishTrust(ctx context.Context, trustedCluster types.Truste return validateResponse.CAs, nil } -func (a *Server) addCertAuthorities(ctx context.Context, trustedCluster types.TrustedCluster, remoteCAs []types.CertAuthority) error { - // the remote auth server has verified our token. add the - // remote certificate authority to our backend - for _, remoteCertAuthority := range remoteCAs { - // change the name of the remote ca to the name of the trusted cluster - remoteCertAuthority.SetName(trustedCluster.GetName()) - - // wipe out roles sent from the remote cluster and set roles from the trusted cluster - remoteCertAuthority.SetRoles(nil) - if remoteCertAuthority.GetType() == types.UserCA { - for _, r := range trustedCluster.GetRoles() { - remoteCertAuthority.AddRole(r) - } - remoteCertAuthority.SetRoleMap(trustedCluster.GetRoleMap()) - } - } - - // we use create here instead of upsert to prevent people from wiping out - // their own ca if it has the same name as the remote ca - _, err := a.CreateCertAuthorities(ctx, remoteCAs...) - return trace.Wrap(err) -} - // DeleteRemoteCluster deletes remote cluster resource, all certificate authorities // associated with it -func (a *Server) DeleteRemoteCluster(ctx context.Context, clusterName string) error { - // To make sure remote cluster exists - to protect against random - // clusterName requests (e.g. when clusterName is set to local cluster name) - if _, err := a.GetRemoteCluster(ctx, clusterName); err != nil { +func (a *Server) DeleteRemoteCluster(ctx context.Context, name string) error { + cn, err := a.GetClusterName() + if err != nil { return trace.Wrap(err) } + // This check ensures users are not deleting their root/own cluster. + if cn.GetClusterName() == name { + return trace.BadParameter("remote cluster %q is the name of this root cluster and cannot be removed.", name) + } + // we only expect host CAs to be present for remote clusters, but it doesn't hurt // to err on the side of paranoia and delete all CA types. var ids []types.CertAuthID for _, caType := range types.CertAuthTypes { ids = append(ids, types.CertAuthID{ Type: caType, - DomainName: clusterName, + DomainName: name, }) } - // delete cert authorities associated with the cluster - if err := a.DeleteCertAuthorities(ctx, ids...); err != nil { - return trace.Wrap(err) - } - - return trace.Wrap(a.Services.DeleteRemoteCluster(ctx, clusterName)) + return trace.Wrap(a.Services.DeleteRemoteClusterInternal(ctx, name, ids)) } // GetRemoteCluster returns remote cluster by name @@ -497,12 +478,6 @@ func (a *Server) validateTrustedCluster(ctx context.Context, validateRequest *au if remoteClusterName == domainName { return nil, trace.AccessDenied("remote cluster has same name as this cluster: %v", domainName) } - _, err = a.GetTrustedCluster(ctx, remoteClusterName) - if err == nil { - return nil, trace.AccessDenied("remote cluster has same name as trusted cluster: %v", remoteClusterName) - } else if !trace.IsNotFound(err) { - return nil, trace.Wrap(err) - } remoteCluster, err := types.NewRemoteCluster(remoteClusterName) if err != nil { @@ -522,15 +497,8 @@ func (a *Server) validateTrustedCluster(ctx context.Context, validateRequest *au } remoteCluster.SetConnectionStatus(teleport.RemoteClusterStatusOffline) - _, err = a.CreateRemoteCluster(ctx, remoteCluster) - if err != nil { - if !trace.IsAlreadyExists(err) { - return nil, trace.Wrap(err) - } - } - - err = a.UpsertCertAuthority(ctx, remoteCA) - if err != nil { + _, err = a.CreateRemoteClusterInternal(ctx, remoteCluster, []types.CertAuthority{remoteCA}) + if err != nil && !trace.IsAlreadyExists(err) { return nil, trace.Wrap(err) } @@ -641,36 +609,6 @@ func (a *Server) sendValidateRequestToProxy(host string, validateRequest *authcl return validateResponse, nil } -// activateCertAuthority will activate both the user and host certificate -// authority given in the services.TrustedCluster resource. -func (a *Server) activateCertAuthority(ctx context.Context, t types.TrustedCluster) error { - return trace.Wrap(a.ActivateCertAuthorities(ctx, []types.CertAuthID{ - { - Type: types.UserCA, - DomainName: t.GetName(), - }, - { - Type: types.HostCA, - DomainName: t.GetName(), - }, - }...)) -} - -// deactivateCertAuthority will deactivate both the user and host certificate -// authority given in the services.TrustedCluster resource. -func (a *Server) deactivateCertAuthority(ctx context.Context, t types.TrustedCluster) error { - return trace.Wrap(a.DeactivateCertAuthorities(ctx, []types.CertAuthID{ - { - Type: types.UserCA, - DomainName: t.GetName(), - }, - { - Type: types.HostCA, - DomainName: t.GetName(), - }, - }...)) -} - // createReverseTunnel will create a services.ReverseTunnel givenin the // services.TrustedCluster resource. func (a *Server) createReverseTunnel(ctx context.Context, t types.TrustedCluster) error { diff --git a/lib/auth/trustedcluster_test.go b/lib/auth/trustedcluster_test.go index ba7ffac769b62..f1581dbc64fee 100644 --- a/lib/auth/trustedcluster_test.go +++ b/lib/auth/trustedcluster_test.go @@ -469,22 +469,11 @@ func TestUpsertTrustedCluster(t *testing.T) { }) require.NoError(t, err) - leafClusterCA := types.CertAuthority(suite.NewTestCA(types.HostCA, "trustedcluster")) - _, err = a.validateTrustedCluster(ctx, &authclient.ValidateTrustedClusterRequest{ - Token: validToken, - CAs: []types.CertAuthority{leafClusterCA}, - TeleportVersion: teleport.Version, - }) - require.NoError(t, err) - - _, err = a.Services.UpsertTrustedCluster(ctx, trustedCluster) - require.NoError(t, err) - ca := suite.NewTestCA(types.UserCA, "trustedcluster") - err = a.addCertAuthorities(ctx, trustedCluster, []types.CertAuthority{ca}) - require.NoError(t, err) - err = a.UpsertCertAuthority(ctx, ca) + configureCAsForTrustedCluster(trustedCluster, []types.CertAuthority{ca}) + + _, err = a.Services.CreateTrustedCluster(ctx, trustedCluster, []types.CertAuthority{ca}) require.NoError(t, err) err = a.createReverseTunnel(ctx, trustedCluster) diff --git a/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden b/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden new file mode 100644 index 0000000000000..d9e09a2c95d71 --- /dev/null +++ b/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden @@ -0,0 +1,10 @@ +version: v1 +kind: update_config +spec: + proxy: localhost + group: "" + url_template: "" + enabled: true +status: + active_version: 16.3.0 + backup_version: "" diff --git a/lib/autoupdate/agent/updater.go b/lib/autoupdate/agent/updater.go index ade0704607cb9..7071f16e42d15 100644 --- a/lib/autoupdate/agent/updater.go +++ b/lib/autoupdate/agent/updater.go @@ -240,20 +240,26 @@ func (u *Updater) Enable(ctx context.Context, override OverrideConfig) error { } desiredVersion := override.ForceVersion + var flags InstallFlags if desiredVersion == "" { resp, err := webclient.Find(&webclient.Config{ - Context: ctx, - ProxyAddr: addr.Addr, - Insecure: u.InsecureSkipVerify, - Timeout: 30 * time.Second, - //Group: cfg.Spec.Group, // TODO(sclevine): add web API for verssion - Pool: u.Pool, + Context: ctx, + ProxyAddr: addr.Addr, + Insecure: u.InsecureSkipVerify, + Timeout: 30 * time.Second, + UpdateGroup: cfg.Spec.Group, + Pool: u.Pool, }) if err != nil { return trace.Errorf("failed to request version from proxy: %w", err) } - desiredVersion, _ = "16.3.0", resp // TODO(sclevine): add web API for version - //desiredVersion := resp.AutoUpdate.AgentVersion + desiredVersion = resp.AutoUpdate.AgentVersion + if resp.Edition == "ent" { + flags |= FlagEnterprise + } + if resp.FIPS { + flags |= FlagFIPS + } } if desiredVersion == "" { @@ -277,7 +283,7 @@ func (u *Updater) Enable(ctx context.Context, override OverrideConfig) error { if template == "" { template = cdnURITemplate } - err = u.Installer.Install(ctx, desiredVersion, template, 0) // TODO(sclevine): add web API for flags + err = u.Installer.Install(ctx, desiredVersion, template, flags) if err != nil { return trace.Errorf("failed to install: %w", err) } diff --git a/lib/autoupdate/agent/updater_test.go b/lib/autoupdate/agent/updater_test.go index d6d0128316c20..e817851fed1f7 100644 --- a/lib/autoupdate/agent/updater_test.go +++ b/lib/autoupdate/agent/updater_test.go @@ -20,6 +20,7 @@ package agent import ( "context" + "encoding/json" "errors" "net/http" "net/http/httptest" @@ -33,6 +34,7 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" + "github.com/gravitational/teleport/api/client/webclient" "github.com/gravitational/teleport/lib/utils/golden" ) @@ -129,10 +131,12 @@ func TestUpdater_Enable(t *testing.T) { cfg *UpdateConfig // nil -> file not present userCfg OverrideConfig installErr error + flags InstallFlags removedVersion string installedVersion string installedTemplate string + requestGroup string errMatch string }{ { @@ -150,6 +154,7 @@ func TestUpdater_Enable(t *testing.T) { }, installedVersion: "16.3.0", installedTemplate: "https://example.com", + requestGroup: "group", }, { name: "config from user", @@ -255,6 +260,12 @@ func TestUpdater_Enable(t *testing.T) { installedVersion: "16.3.0", installedTemplate: cdnURITemplate, }, + { + name: "FIPS and Enterprise flags", + flags: FlagEnterprise | FlagFIPS, + installedVersion: "16.3.0", + installedTemplate: cdnURITemplate, + }, { name: "invalid metadata", cfg: &UpdateConfig{}, @@ -276,9 +287,20 @@ func TestUpdater_Enable(t *testing.T) { require.NoError(t, err) } + var requestedGroup string server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // TODO(sclevine): add web API test including group verification - w.Write([]byte(`{}`)) + requestedGroup = r.URL.Query().Get("group") + config := webclient.PingResponse{ + AutoUpdate: webclient.AutoUpdateSettings{ + AgentVersion: "16.3.0", + }, + } + if tt.flags&FlagEnterprise != 0 { + config.Edition = "ent" + } + config.FIPS = tt.flags&FlagFIPS != 0 + err := json.NewEncoder(w).Encode(config) + require.NoError(t, err) })) t.Cleanup(server.Close) @@ -297,11 +319,13 @@ func TestUpdater_Enable(t *testing.T) { installedTemplate string linkedVersion string removedVersion string + installedFlags InstallFlags ) updater.Installer = &testInstaller{ - FuncInstall: func(_ context.Context, version, template string, _ InstallFlags) error { + FuncInstall: func(_ context.Context, version, template string, flags InstallFlags) error { installedVersion = version installedTemplate = template + installedFlags = flags return tt.installErr }, FuncLink: func(_ context.Context, version string) error { @@ -329,6 +353,8 @@ func TestUpdater_Enable(t *testing.T) { require.Equal(t, tt.installedTemplate, installedTemplate) require.Equal(t, tt.installedVersion, linkedVersion) require.Equal(t, tt.removedVersion, removedVersion) + require.Equal(t, tt.flags, installedFlags) + require.Equal(t, tt.requestGroup, requestedGroup) data, err := os.ReadFile(cfgPath) require.NoError(t, err) diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index 9c11b4f3a1145..7f3e6144cf79a 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -121,6 +121,7 @@ type testPack struct { webSessionS types.WebSessionInterface webTokenS types.WebTokenInterface windowsDesktops services.WindowsDesktops + dynamicWindowsDesktops services.DynamicWindowsDesktops samlIDPServiceProviders services.SAMLIdPServiceProviders userGroups services.UserGroups okta services.Okta @@ -269,6 +270,11 @@ func newPackWithoutCache(dir string, opts ...packOption) (*testPack, error) { return nil, trace.Wrap(err) } + dynamicWindowsDesktopService, err := local.NewDynamicWindowsDesktopService(p.backend) + if err != nil { + return nil, trace.Wrap(err) + } + p.trustS = local.NewCAService(p.backend) p.clusterConfigS = clusterConfig p.provisionerS = local.NewProvisioningService(p.backend) @@ -288,6 +294,7 @@ func newPackWithoutCache(dir string, opts ...packOption) (*testPack, error) { p.databases = local.NewDatabasesService(p.backend) p.databaseServices = local.NewDatabaseServicesService(p.backend) p.windowsDesktops = local.NewWindowsDesktopService(p.backend) + p.dynamicWindowsDesktops = dynamicWindowsDesktopService p.samlIDPServiceProviders, err = local.NewSAMLIdPServiceProviderService(p.backend) if err != nil { return nil, trace.Wrap(err) @@ -428,6 +435,7 @@ func newPack(dir string, setupConfig func(c Config) Config, opts ...packOption) DatabaseServices: p.databaseServices, Databases: p.databases, WindowsDesktops: p.windowsDesktops, + DynamicWindowsDesktops: p.dynamicWindowsDesktops, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, Okta: p.okta, @@ -657,6 +665,7 @@ func TestNodeCAFiltering(t *testing.T) { WebSession: p.cache.webSessionCache, WebToken: p.cache.webTokenCache, WindowsDesktops: p.cache.windowsDesktopsCache, + DynamicWindowsDesktops: p.cache.dynamicWindowsDesktopsCache, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, StaticHostUsers: p.staticHostUsers, @@ -838,6 +847,7 @@ func TestCompletenessInit(t *testing.T) { DatabaseServices: p.databaseServices, Databases: p.databases, WindowsDesktops: p.windowsDesktops, + DynamicWindowsDesktops: p.dynamicWindowsDesktops, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, Okta: p.okta, @@ -921,6 +931,7 @@ func TestCompletenessReset(t *testing.T) { DatabaseServices: p.databaseServices, Databases: p.databases, WindowsDesktops: p.windowsDesktops, + DynamicWindowsDesktops: p.dynamicWindowsDesktops, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, Okta: p.okta, @@ -1130,6 +1141,7 @@ func TestListResources_NodesTTLVariant(t *testing.T) { DatabaseServices: p.databaseServices, Databases: p.databases, WindowsDesktops: p.windowsDesktops, + DynamicWindowsDesktops: p.dynamicWindowsDesktops, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, Okta: p.okta, @@ -1224,6 +1236,7 @@ func initStrategy(t *testing.T) { DatabaseServices: p.databaseServices, Databases: p.databases, WindowsDesktops: p.windowsDesktops, + DynamicWindowsDesktops: p.dynamicWindowsDesktops, SAMLIdPServiceProviders: p.samlIDPServiceProviders, UserGroups: p.userGroups, Okta: p.okta, @@ -4083,9 +4096,9 @@ func newAutoUpdateAgentRollout(t *testing.T) *autoupdate.AutoUpdateAgentRollout r, err := update.NewAutoUpdateAgentRollout(&autoupdate.AutoUpdateAgentRolloutSpec{ StartVersion: "1.2.3", TargetVersion: "2.3.4", - Schedule: "regular", - AutoupdateMode: "enabled", - Strategy: "time-based", + Schedule: update.AgentsScheduleImmediate, + AutoupdateMode: update.AgentsUpdateModeEnabled, + Strategy: update.AgentsStrategyTimeBased, }) require.NoError(t, err) return r diff --git a/lib/cache/collections.go b/lib/cache/collections.go index 17c28934a32c3..f7501f1bfdad8 100644 --- a/lib/cache/collections.go +++ b/lib/cache/collections.go @@ -2328,7 +2328,7 @@ func (dynamicWindowsDesktopsExecutor) getAll(ctx context.Context, cache *Cache, var desktops []types.DynamicWindowsDesktop next := "" for { - d, token, err := cache.dynamicWindowsDesktopsCache.ListDynamicWindowsDesktops(ctx, defaults.MaxIterationLimit, next) + d, token, err := cache.Config.DynamicWindowsDesktops.ListDynamicWindowsDesktops(ctx, defaults.MaxIterationLimit, next) if err != nil { return nil, err } diff --git a/lib/client/api.go b/lib/client/api.go index 0a7b35dbd5e51..d087fb02d1e34 100644 --- a/lib/client/api.go +++ b/lib/client/api.go @@ -360,6 +360,9 @@ type Config struct { // authenticators, such as remote hosts or virtual machines. PreferOTP bool + // PreferSSO prefers SSO in favor of other MFA methods. + PreferSSO bool + // CheckVersions will check that client version is compatible // with auth server version when connecting. CheckVersions bool @@ -3043,6 +3046,8 @@ func (tc *TeleportClient) ConnectToCluster(ctx context.Context) (_ *ClusterClien return nil, trace.NewAggregate(err, pclt.Close()) } authClientCfg.MFAPromptConstructor = tc.NewMFAPrompt + authClientCfg.SSOMFACeremonyConstructor = tc.NewSSOMFACeremony + authClient, err := authclient.NewClient(authClientCfg) if err != nil { return nil, trace.NewAggregate(err, pclt.Close()) @@ -5062,9 +5067,10 @@ func (tc *TeleportClient) NewKubernetesServiceClient(ctx context.Context, cluste Credentials: []client.Credentials{ client.LoadTLS(tlsConfig), }, - ALPNConnUpgradeRequired: tc.TLSRoutingConnUpgradeRequired, - InsecureAddressDiscovery: tc.InsecureSkipVerify, - MFAPromptConstructor: tc.NewMFAPrompt, + ALPNConnUpgradeRequired: tc.TLSRoutingConnUpgradeRequired, + InsecureAddressDiscovery: tc.InsecureSkipVerify, + MFAPromptConstructor: tc.NewMFAPrompt, + SSOMFACeremonyConstructor: tc.NewSSOMFACeremony, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/client/cluster_client_test.go b/lib/client/cluster_client_test.go index 70a9985853ceb..7a90be3f30d80 100644 --- a/lib/client/cluster_client_test.go +++ b/lib/client/cluster_client_test.go @@ -361,8 +361,9 @@ func TestIssueUserCertsWithMFA(t *testing.T) { tc: &TeleportClient{ localAgent: agent, Config: Config{ - SiteName: "test", - Tracer: tracing.NoopTracer("test"), + WebProxyAddr: "proxy.example.com", + SiteName: "test", + Tracer: tracing.NoopTracer("test"), MFAPromptConstructor: func(cfg *libmfa.PromptConfig) mfa.Prompt { return test.prompt }, diff --git a/lib/client/mfa.go b/lib/client/mfa.go index 90c6f975d3a1c..51f2073b71e45 100644 --- a/lib/client/mfa.go +++ b/lib/client/mfa.go @@ -26,6 +26,7 @@ import ( "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/mfa" libmfa "github.com/gravitational/teleport/lib/client/mfa" + "github.com/gravitational/teleport/lib/client/sso" ) // NewMFACeremony returns a new MFA ceremony configured for this client. @@ -33,6 +34,7 @@ func (tc *TeleportClient) NewMFACeremony() *mfa.Ceremony { return &mfa.Ceremony{ CreateAuthenticateChallenge: tc.createAuthenticateChallenge, PromptConstructor: tc.NewMFAPrompt, + SSOMFACeremonyConstructor: tc.NewSSOMFACeremony, } } @@ -61,6 +63,7 @@ func (tc *TeleportClient) NewMFAPrompt(opts ...mfa.PromptOpt) mfa.Prompt { PromptConfig: *cfg, Writer: tc.Stderr, PreferOTP: tc.PreferOTP, + PreferSSO: tc.PreferSSO, AllowStdinHijack: tc.AllowStdinHijack, StdinFunc: tc.StdinFunc, }) @@ -79,5 +82,21 @@ func (tc *TeleportClient) newPromptConfig(opts ...mfa.PromptOpt) *libmfa.PromptC cfg.WebauthnLoginFunc = tc.WebauthnLogin cfg.WebauthnSupported = true } + return cfg } + +// NewSSOMFACeremony creates a new SSO MFA ceremony. +func (tc *TeleportClient) NewSSOMFACeremony(ctx context.Context) (mfa.SSOMFACeremony, error) { + rdConfig, err := tc.ssoRedirectorConfig(ctx, "" /*connectorDisplayName*/) + if err != nil { + return nil, trace.Wrap(err) + } + + rd, err := sso.NewRedirector(rdConfig) + if err != nil { + return nil, trace.Wrap(err) + } + + return sso.NewCLIMFACeremony(rd), nil +} diff --git a/lib/client/mfa/cli.go b/lib/client/mfa/cli.go index a5e4cc8f26178..400ad7d88e6c6 100644 --- a/lib/client/mfa/cli.go +++ b/lib/client/mfa/cli.go @@ -25,18 +25,29 @@ import ( "log/slog" "os" "runtime" + "strings" "sync" "github.com/gravitational/trace" "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/constants" + mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1" "github.com/gravitational/teleport/api/utils/prompt" wancli "github.com/gravitational/teleport/lib/auth/webauthncli" wantypes "github.com/gravitational/teleport/lib/auth/webauthntypes" "github.com/gravitational/teleport/lib/auth/webauthnwin" ) +const ( + // cliMFATypeOTP is the CLI display name for OTP. + cliMFATypeOTP = "OTP" + // cliMFATypeWebauthn is the CLI display name for Webauthn. + cliMFATypeWebauthn = "WEBAUTHN" + // cliMFATypeSSO is the CLI display name for SSO. + cliMFATypeSSO = "SSO" +) + // CLIPromptConfig contains CLI prompt config options. type CLIPromptConfig struct { PromptConfig @@ -51,6 +62,9 @@ type CLIPromptConfig struct { // PreferOTP favors OTP challenges, if applicable. // Takes precedence over AuthenticatorAttachment settings. PreferOTP bool + // PreferSSO favors SSO challenges, if applicable. + // Takes precedence over AuthenticatorAttachment settings. + PreferSSO bool // StdinFunc allows tests to override prompt.Stdin(). // If nil prompt.Stdin() is used. StdinFunc func() prompt.StdinReader @@ -109,24 +123,51 @@ func (c *CLIPrompt) Run(ctx context.Context, chal *proto.MFAAuthenticateChalleng promptOTP := chal.TOTP != nil promptWebauthn := chal.WebauthnChallenge != nil + promptSSO := chal.SSOChallenge != nil // No prompt to run, no-op. - if !promptOTP && !promptWebauthn { + if !promptOTP && !promptWebauthn && !promptSSO { return &proto.MFAAuthenticateResponse{}, nil } + var availableMethods []string + if promptWebauthn { + availableMethods = append(availableMethods, cliMFATypeWebauthn) + } + if promptSSO { + availableMethods = append(availableMethods, cliMFATypeSSO) + } + if promptOTP { + availableMethods = append(availableMethods, cliMFATypeOTP) + } + // Check off unsupported methods. if promptWebauthn && !c.cfg.WebauthnSupported { promptWebauthn = false slog.DebugContext(ctx, "hardware device MFA not supported by your platform") - if !promptOTP { - return nil, trace.BadParameter("hardware device MFA not supported by your platform, please register an OTP device") - } + } + + if promptSSO && c.cfg.SSOMFACeremony == nil { + promptSSO = false + slog.DebugContext(ctx, "SSO MFA not supported by this client, this is likely a bug") } // Prefer whatever method is requested by the client. - if c.cfg.PreferOTP && promptOTP { - promptWebauthn = false + var chosenMethods []string + var userSpecifiedMethod bool + switch { + case c.cfg.PreferSSO && promptSSO: + chosenMethods = []string{cliMFATypeSSO} + promptWebauthn, promptOTP = false, false + userSpecifiedMethod = true + case c.cfg.PreferOTP && promptOTP: + chosenMethods = []string{cliMFATypeOTP} + promptWebauthn, promptSSO = false, false + userSpecifiedMethod = true + case c.cfg.AuthenticatorAttachment != wancli.AttachmentAuto: + chosenMethods = []string{cliMFATypeWebauthn} + promptSSO, promptOTP = false, false + userSpecifiedMethod = true } // Use stronger auth methods if hijack is not allowed. @@ -134,10 +175,38 @@ func (c *CLIPrompt) Run(ctx context.Context, chal *proto.MFAAuthenticateChalleng promptOTP = false } - // If a specific webauthn attachment was requested, skip OTP. - // Otherwise, allow dual prompt with OTP. - if promptWebauthn && c.cfg.AuthenticatorAttachment != wancli.AttachmentAuto { + // If we have multiple viable options, prefer Webauthn > SSO > OTP. + switch { + case promptWebauthn: + chosenMethods = []string{cliMFATypeWebauthn} + promptSSO = false + // Allow dual prompt with OTP. + if promptOTP { + chosenMethods = append(chosenMethods, cliMFATypeOTP) + } + case promptSSO: + chosenMethods = []string{cliMFATypeSSO} promptOTP = false + case promptOTP: + chosenMethods = []string{cliMFATypeOTP} + } + + // If there are multiple options and we chose one without it being specifically + // requested by the user, notify the user about it and how to request a specific method. + if len(availableMethods) > len(chosenMethods) && len(chosenMethods) > 0 && !userSpecifiedMethod { + const msg = "" + + "Available MFA methods [%v]. Continuing with %v.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + fmt.Fprintf(c.writer(), msg, strings.Join(availableMethods, ", "), strings.Join(chosenMethods, " and ")) + } + + // DELETE IN v18.0 after TOTP session MFA support is removed (codingllama) + // Technically we could remove api/mfa.WithPromptChallengeExtensions along + // with this, as it's likely its only use, although arguably keeping it could + // prove useful. + usageSessionMFA := c.cfg.Extensions.GetScope() == mfav1.ChallengeScope_CHALLENGE_SCOPE_USER_SESSION + if promptOTP && usageSessionMFA { + fmt.Fprint(c.writer(), "\nWARNING: Starting with Teleport 18, OTP will not be accepted for per-session MFA.\n\n") } switch { @@ -147,12 +216,14 @@ func (c *CLIPrompt) Run(ctx context.Context, chal *proto.MFAAuthenticateChalleng case promptWebauthn: resp, err := c.promptWebauthn(ctx, chal, c.getWebauthnPrompt(ctx)) return resp, trace.Wrap(err) + case promptSSO: + resp, err := c.promptSSO(ctx, chal) + return resp, trace.Wrap(err) case promptOTP: resp, err := c.promptOTP(ctx, c.cfg.Quiet) return resp, trace.Wrap(err) default: - // We shouldn't reach this case as we would have hit the no-op case above. - return nil, trace.BadParameter("no MFA methods to prompt") + return nil, trace.BadParameter("client does not support any available MFA methods [%v], see debug logs for details", strings.Join(availableMethods, ", ")) } } @@ -305,3 +376,8 @@ func (w *webauthnPromptWithOTP) PromptPIN() (string, error) { return w.LoginPrompt.PromptPIN() } + +func (c *CLIPrompt) promptSSO(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + resp, err := c.cfg.SSOMFACeremony.Run(ctx, chal) + return resp, trace.Wrap(err) +} diff --git a/lib/client/mfa/cli_test.go b/lib/client/mfa/cli_test.go index 54e0fcfd92fd9..b9b69b7c16f2d 100644 --- a/lib/client/mfa/cli_test.go +++ b/lib/client/mfa/cli_test.go @@ -43,6 +43,7 @@ func TestCLIPrompt(t *testing.T) { name string stdin string challenge *proto.MFAAuthenticateChallenge + modifyPromptConfig func(cfg *mfa.CLIPromptConfig) expectErr error expectStdOut string expectResp *proto.MFAAuthenticateResponse @@ -65,7 +66,7 @@ func TestCLIPrompt(t *testing.T) { }, }, }, { - name: "OK totp", + name: "OK otp", expectStdOut: "Enter an OTP code from a device:\n", stdin: "123456", challenge: &proto.MFAAuthenticateChallenge{ @@ -79,11 +80,83 @@ func TestCLIPrompt(t *testing.T) { }, }, }, { - name: "OK webauthn or totp choose webauthn", - expectStdOut: "Tap any security key or enter a code from a OTP device\n", + name: "OK sso", + expectStdOut: "", // sso stdout is handled internally in the SSO ceremony, which is mocked in this test. + challenge: &proto.MFAAuthenticateChallenge{ + SSOChallenge: &proto.SSOChallenge{}, + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_SSO{ + SSO: &proto.SSOResponse{ + RequestId: "request-id", + Token: "mfa-token", + }, + }, + }, + }, { + name: "OK prefer otp when specified", + expectStdOut: "Enter an OTP code from a device:\n", + stdin: "123456", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.PreferOTP = true + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_TOTP{ + TOTP: &proto.TOTPResponse{ + Code: "123456", + }, + }, + }, + }, { + name: "OK prefer sso when specified", + expectStdOut: "", challenge: &proto.MFAAuthenticateChallenge{ WebauthnChallenge: &webauthnpb.CredentialAssertion{}, TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.PreferSSO = true + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_SSO{ + SSO: &proto.SSOResponse{ + RequestId: "request-id", + Token: "mfa-token", + }, + }, + }, + }, { + name: "OK prefer webauthn with authenticator attachment requested", + expectStdOut: "Tap any security key\n", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AuthenticatorAttachment = wancli.AttachmentPlatform + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_Webauthn{ + Webauthn: &webauthnpb.CredentialAssertionResponse{}, + }, + }, + }, + { + name: "OK prefer webauthn over sso", + expectStdOut: "" + + "Available MFA methods [WEBAUTHN, SSO]. Continuing with WEBAUTHN.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + + "Tap any security key\n", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + SSOChallenge: &proto.SSOChallenge{}, }, expectResp: &proto.MFAAuthenticateResponse{ Response: &proto.MFAAuthenticateResponse_Webauthn{ @@ -91,12 +164,89 @@ func TestCLIPrompt(t *testing.T) { }, }, }, { - name: "OK webauthn or totp choose totp", - expectStdOut: "Tap any security key or enter a code from a OTP device\n", - stdin: "123456", + name: "OK prefer webauthn+otp over sso", + expectStdOut: "" + + "Available MFA methods [WEBAUTHN, SSO, OTP]. Continuing with WEBAUTHN and OTP.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + + "Tap any security key or enter a code from a OTP device\n", challenge: &proto.MFAAuthenticateChallenge{ WebauthnChallenge: &webauthnpb.CredentialAssertion{}, TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AllowStdinHijack = true + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_Webauthn{ + Webauthn: &webauthnpb.CredentialAssertionResponse{}, + }, + }, + }, { + name: "OK prefer sso over otp", + expectStdOut: "" + + "Available MFA methods [SSO, OTP]. Continuing with SSO.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n", + challenge: &proto.MFAAuthenticateChallenge{ + TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_SSO{ + SSO: &proto.SSOResponse{ + RequestId: "request-id", + Token: "mfa-token", + }, + }, + }, + }, { + name: "OK prefer webauthn over otp when stdin hijack disallowed", + expectStdOut: "" + + "Available MFA methods [WEBAUTHN, OTP]. Continuing with WEBAUTHN.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + + "Tap any security key\n", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + TOTP: &proto.TOTPChallenge{}, + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_Webauthn{ + Webauthn: &webauthnpb.CredentialAssertionResponse{}, + }, + }, + }, { + name: "OK webauthn or otp with stdin hijack allowed, choose webauthn", + expectStdOut: "" + + "Available MFA methods [WEBAUTHN, SSO, OTP]. Continuing with WEBAUTHN and OTP.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + + "Tap any security key or enter a code from a OTP device\n", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AllowStdinHijack = true + }, + expectResp: &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_Webauthn{ + Webauthn: &webauthnpb.CredentialAssertionResponse{}, + }, + }, + }, { + name: "OK webauthn or otp with stdin hijack allowed, choose otp", + expectStdOut: "" + + "Available MFA methods [WEBAUTHN, SSO, OTP]. Continuing with WEBAUTHN and OTP.\n" + + "If you wish to perform MFA with another method, specify with flag --mfa-mode=.\n\n" + + "Tap any security key or enter a code from a OTP device\n", + stdin: "123456", + challenge: &proto.MFAAuthenticateChallenge{ + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + TOTP: &proto.TOTPChallenge{}, + SSOChallenge: &proto.SSOChallenge{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AllowStdinHijack = true }, expectResp: &proto.MFAAuthenticateResponse{ Response: &proto.MFAAuthenticateResponse_TOTP{ @@ -113,19 +263,29 @@ func TestCLIPrompt(t *testing.T) { }, expectErr: context.DeadlineExceeded, }, { - name: "NOK no totp response", + name: "NOK no sso response", + expectStdOut: "", + challenge: &proto.MFAAuthenticateChallenge{ + SSOChallenge: &proto.SSOChallenge{}, + }, + expectErr: context.DeadlineExceeded, + }, { + name: "NOK no otp response", expectStdOut: "Enter an OTP code from a device:\n", challenge: &proto.MFAAuthenticateChallenge{ TOTP: &proto.TOTPChallenge{}, }, expectErr: context.DeadlineExceeded, }, { - name: "NOK no webauthn or totp response", + name: "NOK no webauthn or otp response", expectStdOut: "Tap any security key or enter a code from a OTP device\n", challenge: &proto.MFAAuthenticateChallenge{ WebauthnChallenge: &webauthnpb.CredentialAssertion{}, TOTP: &proto.TOTPChallenge{}, }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AllowStdinHijack = true + }, expectErr: context.DeadlineExceeded, }, { @@ -134,6 +294,9 @@ func TestCLIPrompt(t *testing.T) { TOTP: &proto.TOTPChallenge{}, WebauthnChallenge: &webauthnpb.CredentialAssertion{}, }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.AllowStdinHijack = true + }, expectStdOut: `Tap any security key or enter a code from a OTP device Detected security key tap Enter your security key PIN: @@ -224,19 +387,27 @@ Enter your security key PIN: } }, }, + { + name: "NOK webauthn and SSO not supported", + challenge: &proto.MFAAuthenticateChallenge{ + SSOChallenge: &proto.SSOChallenge{}, + WebauthnChallenge: &webauthnpb.CredentialAssertion{}, + }, + modifyPromptConfig: func(cfg *mfa.CLIPromptConfig) { + cfg.WebauthnSupported = false + cfg.SSOMFACeremony = nil + }, + expectErr: trace.BadParameter("client does not support any available MFA methods [WEBAUTHN, SSO], see debug logs for details"), + }, } { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() - oldStdin := prompt.Stdin() - t.Cleanup(func() { prompt.SetStdin(oldStdin) }) - stdin := prompt.NewFakeReader() if tc.stdin != "" { stdin.AddString(tc.stdin) } - prompt.SetStdin(stdin) cfg := mfa.NewPromptConfig("proxy.example.com") cfg.WebauthnSupported = true @@ -257,16 +428,26 @@ Enter your security key PIN: } } + cfg.SSOMFACeremony = &mockSSOMFACeremony{ + mfaResp: tc.expectResp, + } + buffer := make([]byte, 0, 100) out := bytes.NewBuffer(buffer) - prompt := mfa.NewCLIPromptV2(&mfa.CLIPromptConfig{ - PromptConfig: *cfg, - Writer: out, - AllowStdinHijack: true, - }) - resp, err := prompt.Run(ctx, tc.challenge) + cliPromptConfig := &mfa.CLIPromptConfig{ + PromptConfig: *cfg, + Writer: out, + StdinFunc: func() prompt.StdinReader { + return stdin + }, + } + if tc.modifyPromptConfig != nil { + tc.modifyPromptConfig(cliPromptConfig) + } + + resp, err := mfa.NewCLIPromptV2(cliPromptConfig).Run(ctx, tc.challenge) if tc.expectErr != nil { require.ErrorIs(t, err, tc.expectErr) } else { @@ -278,3 +459,24 @@ Enter your security key PIN: }) } } + +type mockSSOMFACeremony struct { + mfaResp *proto.MFAAuthenticateResponse +} + +func (m *mockSSOMFACeremony) GetClientCallbackURL() string { + return "" +} + +// Run the SSO MFA ceremony. +func (m *mockSSOMFACeremony) Run(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + if m.mfaResp == nil { + return nil, context.DeadlineExceeded + } + if m.mfaResp.GetSSO() == nil { + return nil, trace.BadParameter("expected an SSO response but got %T", m.mfaResp.Response) + } + return m.mfaResp, nil +} + +func (m *mockSSOMFACeremony) Close() {} diff --git a/lib/client/sso/ceremony.go b/lib/client/sso/ceremony.go index cb5b57c5a3183..8a2a64debfe49 100644 --- a/lib/client/sso/ceremony.go +++ b/lib/client/sso/ceremony.go @@ -23,6 +23,7 @@ import ( "github.com/gravitational/trace" + "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/lib/auth/authclient" ) @@ -61,3 +62,66 @@ func NewCLICeremony(rd *Redirector, init CeremonyInit) *Ceremony { GetCallbackResponse: rd.WaitForResponse, } } + +// Ceremony is a customizable SSO MFA ceremony. +type MFACeremony struct { + clientCallbackURL string + close func() + HandleRedirect func(ctx context.Context, redirectURL string) error + GetCallbackMFAToken func(ctx context.Context) (string, error) +} + +// GetClientCallbackURL returns the client callback URL. +func (m *MFACeremony) GetClientCallbackURL() string { + return m.clientCallbackURL +} + +// Run the SSO MFA ceremony. +func (m *MFACeremony) Run(ctx context.Context, chal *proto.MFAAuthenticateChallenge) (*proto.MFAAuthenticateResponse, error) { + if err := m.HandleRedirect(ctx, chal.SSOChallenge.RedirectUrl); err != nil { + return nil, trace.Wrap(err) + } + + mfaToken, err := m.GetCallbackMFAToken(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + + return &proto.MFAAuthenticateResponse{ + Response: &proto.MFAAuthenticateResponse_SSO{ + SSO: &proto.SSOResponse{ + RequestId: chal.SSOChallenge.RequestId, + Token: mfaToken, + }, + }, + }, nil +} + +// Close closes resources associated with the SSO MFA ceremony. +func (m *MFACeremony) Close() { + if m.close != nil { + m.close() + } +} + +// NewCLIMFACeremony creates a new CLI SSO ceremony from the given redirector. +// The returned MFACeremony takes ownership of the Redirector. +func NewCLIMFACeremony(rd *Redirector) *MFACeremony { + return &MFACeremony{ + clientCallbackURL: rd.ClientCallbackURL, + close: rd.Close, + HandleRedirect: rd.OpenRedirect, + GetCallbackMFAToken: func(ctx context.Context) (string, error) { + loginResp, err := rd.WaitForResponse(ctx) + if err != nil { + return "", trace.Wrap(err) + } + + if loginResp.MFAToken == "" { + return "", trace.BadParameter("login response for SSO MFA flow missing MFA token") + } + + return loginResp.MFAToken, nil + }, + } +} diff --git a/lib/client/sso/ceremony_test.go b/lib/client/sso/ceremony_test.go index 0851ac1b4daf2..4ea904697c8aa 100644 --- a/lib/client/sso/ceremony_test.go +++ b/lib/client/sso/ceremony_test.go @@ -26,22 +26,25 @@ import ( "net/http/httptest" "regexp" "testing" - "text/template" "github.com/gravitational/trace" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/lib/client/sso" "github.com/gravitational/teleport/lib/web" ) func TestCLICeremony(t *testing.T) { + ctx := context.Background() + mockProxy := newMockProxy(t) username := "alice" // Capture stderr. - stderr := bytes.NewBuffer([]byte{}) + stderr := &bytes.Buffer{} // Create a basic redirector. rd, err := sso.NewRedirector(sso.RedirectorConfig{ @@ -69,7 +72,66 @@ func TestCLICeremony(t *testing.T) { return mockIdPServer.URL, nil }) - template.New("Failed to open a browser window for login: %v\n") + // Modify handle redirect to also browse to the clickable URL printed to stderr. + baseHandleRedirect := ceremony.HandleRedirect + ceremony.HandleRedirect = func(ctx context.Context, redirectURL string) error { + if err := baseHandleRedirect(ctx, redirectURL); err != nil { + return trace.Wrap(err) + } + + // Read the clickable url from stderr and navigate to it + // using a simplified regexp for http://127.0.0.1:/ + const clickableURLPattern = `http://127.0.0.1:\d+/[0-9A-Fa-f-]+` + clickableURL := regexp.MustCompile(clickableURLPattern).FindString(stderr.String()) + resp, err := http.Get(clickableURL) + require.NoError(t, err) + defer resp.Body.Close() + + // User should be redirected to success screen. + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, sso.LoginSuccessRedirectURL, string(body)) + return nil + } + + loginResp, err := ceremony.Run(ctx) + require.NoError(t, err) + require.Equal(t, username, loginResp.Username) +} + +func TestCLICeremony_MFA(t *testing.T) { + const token = "sso-mfa-token" + const requestID = "soo-mfa-request-id" + + ctx := context.Background() + mockProxy := newMockProxy(t) + + // Capture stderr. + stderr := bytes.NewBuffer([]byte{}) + + // Create a basic redirector. + rd, err := sso.NewRedirector(sso.RedirectorConfig{ + ProxyAddr: mockProxy.URL, + Browser: teleport.BrowserNone, + Stderr: stderr, + }) + require.NoError(t, err) + + // Construct a fake mfa response with the redirector's client callback URL. + successResponseURL, err := web.ConstructSSHResponse(web.AuthParams{ + ClientRedirectURL: rd.ClientCallbackURL, + MFAToken: token, + }) + require.NoError(t, err) + + // Open a mock IdP server which will handle a redirect and result in the expected IdP session payload. + mockIdPServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, successResponseURL.String(), http.StatusPermanentRedirect) + })) + t.Cleanup(mockIdPServer.Close) + + ceremony := sso.NewCLIMFACeremony(rd) + t.Cleanup(ceremony.Close) // Modify handle redirect to also browse to the clickable URL printed to stderr. baseHandleRedirect := ceremony.HandleRedirect @@ -94,7 +156,14 @@ func TestCLICeremony(t *testing.T) { return nil } - loginResp, err := ceremony.Run(context.Background()) + mfaResponse, err := ceremony.Run(ctx, &proto.MFAAuthenticateChallenge{ + SSOChallenge: &proto.SSOChallenge{ + RedirectUrl: mockIdPServer.URL, + RequestId: requestID, + }, + }) require.NoError(t, err) - require.Equal(t, username, loginResp.Username) + require.NotNil(t, mfaResponse.GetSSO()) + assert.Equal(t, token, mfaResponse.GetSSO().Token) + assert.Equal(t, requestID, mfaResponse.GetSSO().RequestId) } diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 5ef37af409f81..87c41b8986b6f 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -297,6 +297,9 @@ type IntegrationConfAzureOIDC struct { // When this is true, the integration script will produce // a cache file necessary for TAG synchronization. AccessGraphEnabled bool + + // SkipOIDCConfiguration is a flag indicating that OIDC configuration should be skipped. + SkipOIDCConfiguration bool } // IntegrationConfDeployServiceIAM contains the arguments of diff --git a/lib/integrations/azureoidc/enterprise_app.go b/lib/integrations/azureoidc/enterprise_app.go index e159470d0bb39..e7de09225ec58 100644 --- a/lib/integrations/azureoidc/enterprise_app.go +++ b/lib/integrations/azureoidc/enterprise_app.go @@ -52,7 +52,7 @@ var appRoles = []string{ // - Provides Teleport with OIDC authentication to Azure // - Is given the permissions to access certain Microsoft Graph API endpoints for this tenant. // - Provides SSO to the Teleport cluster via SAML. -func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnectorName string) (string, string, error) { +func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnectorName string, skipOIDCSetup bool) (string, string, error) { var appID, tenantID string tenantID, err := getTenantID() @@ -120,8 +120,12 @@ func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnect } } - if err := createFederatedAuthCredential(ctx, graphClient, *app.ID, proxyPublicAddr); err != nil { - return appID, tenantID, trace.Wrap(err, "failed to create an OIDC federated auth credential") + // Skip OIDC setup if requested. + // This is useful for clusters that can't use OIDC because they are not reachable from the public internet. + if !skipOIDCSetup { + if err := createFederatedAuthCredential(ctx, graphClient, *app.ID, proxyPublicAddr); err != nil { + return appID, tenantID, trace.Wrap(err, "failed to create an OIDC federated auth credential") + } } acsURL, err := url.Parse(proxyPublicAddr) diff --git a/lib/integrations/azureoidc/provision_sso.go b/lib/integrations/azureoidc/provision_sso.go index 07d4366040752..9bb17aa5771dd 100644 --- a/lib/integrations/azureoidc/provision_sso.go +++ b/lib/integrations/azureoidc/provision_sso.go @@ -48,6 +48,9 @@ func setupSSO(ctx context.Context, graphClient *msgraph.Client, appObjectID stri webApp := &msgraph.WebApplication{} webApp.RedirectURIs = &uris app.Web = webApp + securityGroups := new(string) + *securityGroups = "SecurityGroup" + app.GroupMembershipClaims = securityGroups err = graphClient.UpdateApplication(ctx, appObjectID, app) diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go index b66665d16b927..729ab913139ff 100644 --- a/lib/kube/proxy/forwarder.go +++ b/lib/kube/proxy/forwarder.go @@ -2306,7 +2306,7 @@ func (s *clusterSession) close() { } } -func (s *clusterSession) monitorConn(conn net.Conn, err error) (net.Conn, error) { +func (s *clusterSession) monitorConn(conn net.Conn, err error, hostID string) (net.Conn, error) { if err != nil { return nil, trace.Wrap(err) } @@ -2321,10 +2321,18 @@ func (s *clusterSession) monitorConn(conn net.Conn, err error) (net.Conn, error) s.connMonitorCancel(err) return nil, trace.Wrap(err) } - + lockTargets := s.LockTargets() + // when the target is not a kubernetes_service instance, we don't need to lock it. + // the target could be a remote cluster or a local Kubernetes API server. In both cases, + // hostID is empty. + if hostID != "" { + lockTargets = append(lockTargets, types.LockTarget{ + ServerID: hostID, + }) + } err = srv.StartMonitor(srv.MonitorConfig{ LockWatcher: s.parent.cfg.LockWatcher, - LockTargets: s.LockTargets(), + LockTargets: lockTargets, DisconnectExpiredCert: s.disconnectExpiredCert, ClientIdleTimeout: s.clientIdleTimeout, Clock: s.parent.cfg.Clock, @@ -2356,12 +2364,16 @@ func (s *clusterSession) getServerMetadata() apievents.ServerMetadata { } func (s *clusterSession) Dial(network, addr string) (net.Conn, error) { - return s.monitorConn(s.dial(s.requestContext, network, addr)) + var hostID string + conn, err := s.dial(s.requestContext, network, addr, withHostIDCollection(&hostID)) + return s.monitorConn(conn, err, hostID) } func (s *clusterSession) DialWithContext(opts ...contextDialerOption) func(ctx context.Context, network, addr string) (net.Conn, error) { return func(ctx context.Context, network, addr string) (net.Conn, error) { - return s.monitorConn(s.dial(ctx, network, addr, opts...)) + var hostID string + conn, err := s.dial(ctx, network, addr, append(opts, withHostIDCollection(&hostID))...) + return s.monitorConn(conn, err, hostID) } } diff --git a/lib/kube/proxy/roundtrip.go b/lib/kube/proxy/roundtrip.go index b6935c3ce6cfc..3630f3e898dd7 100644 --- a/lib/kube/proxy/roundtrip.go +++ b/lib/kube/proxy/roundtrip.go @@ -113,7 +113,6 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { if err != nil { return nil, err } - if err := req.Write(conn); err != nil { conn.Close() return nil, err diff --git a/lib/kube/proxy/transport.go b/lib/kube/proxy/transport.go index 7ee2eabad282b..1e8e0067ed9e4 100644 --- a/lib/kube/proxy/transport.go +++ b/lib/kube/proxy/transport.go @@ -347,6 +347,7 @@ func (f *Forwarder) localClusterDialer(kubeClusterName string, opts ...contextDi ProxyIDs: s.GetProxyIDs(), }) if err == nil { + opt.collect(s.GetHostID()) return conn, nil } errs = append(errs, trace.Wrap(err)) @@ -423,13 +424,21 @@ func (f *Forwarder) getContextDialerFunc(s *clusterSession, opts ...contextDiale // contextDialerOptions is a set of options that can be used to filter // the hosts that the dialer connects to. type contextDialerOptions struct { - hostID string + hostIDFilter string + collectHostID *string } // matches returns true if the host matches the hostID of the dialer options or // if the dialer hostID is empty. func (c *contextDialerOptions) matches(hostID string) bool { - return c.hostID == "" || c.hostID == hostID + return c.hostIDFilter == "" || c.hostIDFilter == hostID +} + +// collect sets the hostID that the dialer connected to if collectHostID is not nil. +func (c *contextDialerOptions) collect(hostID string) { + if c.collectHostID != nil { + *c.collectHostID = hostID + } } // contextDialerOption is a functional option for the contextDialerOptions. @@ -442,6 +451,14 @@ type contextDialerOption func(*contextDialerOptions) // error. func withTargetHostID(hostID string) contextDialerOption { return func(o *contextDialerOptions) { - o.hostID = hostID + o.hostIDFilter = hostID + } +} + +// withHostIDCollection is a functional option that sets the hostID of the dialer +// to the provided pointer. +func withHostIDCollection(hostID *string) contextDialerOption { + return func(o *contextDialerOptions) { + o.collectHostID = hostID } } diff --git a/lib/msgraph/models.go b/lib/msgraph/models.go index f867ecbb634c5..829d55a040464 100644 --- a/lib/msgraph/models.go +++ b/lib/msgraph/models.go @@ -18,6 +18,7 @@ package msgraph import ( "encoding/json" + "slices" "github.com/gravitational/trace" ) @@ -34,6 +35,12 @@ type DirectoryObject struct { type Group struct { DirectoryObject + GroupTypes []string `json:"groupTypes,omitempty"` +} + +func (g *Group) IsOffice365Group() bool { + const office365Group = "Unified" + return slices.Contains(g.GroupTypes, office365Group) } func (g *Group) isGroupMember() {} @@ -53,9 +60,10 @@ func (u *User) GetID() *string { return u.ID } type Application struct { DirectoryObject - AppID *string `json:"appId,omitempty"` - IdentifierURIs *[]string `json:"identifierUris,omitempty"` - Web *WebApplication `json:"web,omitempty"` + AppID *string `json:"appId,omitempty"` + IdentifierURIs *[]string `json:"identifierUris,omitempty"` + Web *WebApplication `json:"web,omitempty"` + GroupMembershipClaims *string `json:"groupMembershipClaims,omitempty"` } type WebApplication struct { diff --git a/lib/service/service.go b/lib/service/service.go index 215fdb0035f00..cef1270059802 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -2529,6 +2529,7 @@ func (process *TeleportProcess) newAccessCacheForServices(cfg accesspoint.Config cfg.WebSession = services.Identity.WebSessions() cfg.WebToken = services.Identity.WebTokens() cfg.WindowsDesktops = services.WindowsDesktops + cfg.DynamicWindowsDesktops = services.DynamicWindowsDesktops cfg.AutoUpdateService = services.AutoUpdateService cfg.ProvisioningStates = services.ProvisioningStates cfg.IdentityCenter = services.IdentityCenter @@ -2576,6 +2577,7 @@ func (process *TeleportProcess) newAccessCacheForClient(cfg accesspoint.Config, cfg.WebSession = client.WebSessions() cfg.WebToken = client.WebTokens() cfg.WindowsDesktops = client + cfg.DynamicWindowsDesktops = client.DynamicDesktopClient() cfg.AutoUpdateService = client return accesspoint.NewCache(cfg) diff --git a/lib/services/local/dynamic_desktops.go b/lib/services/local/dynamic_desktops.go index b4b482d600de7..6254db2bd2a34 100644 --- a/lib/services/local/dynamic_desktops.go +++ b/lib/services/local/dynamic_desktops.go @@ -73,7 +73,11 @@ func (s *DynamicWindowsDesktopService) CreateDynamicWindowsDesktop(ctx context.C // UpdateDynamicWindowsDesktop updates a dynamic Windows desktop resource. func (s *DynamicWindowsDesktopService) UpdateDynamicWindowsDesktop(ctx context.Context, desktop types.DynamicWindowsDesktop) (types.DynamicWindowsDesktop, error) { - d, err := s.service.UpdateResource(ctx, desktop) + // ConditionalUpdateResource can return invalid revision instead of not found, so we'll check if resource exists first + if _, err := s.service.GetResource(ctx, desktop.GetName()); trace.IsNotFound(err) { + return nil, err + } + d, err := s.service.ConditionalUpdateResource(ctx, desktop) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/services/local/dynamic_desktops_test.go b/lib/services/local/dynamic_desktops_test.go index 75ed040080648..9e20ed30e7eb2 100644 --- a/lib/services/local/dynamic_desktops_test.go +++ b/lib/services/local/dynamic_desktops_test.go @@ -182,8 +182,15 @@ func TestDynamicWindowsService_UpdateDynamicDesktop(t *testing.T) { require.Error(t, err) require.True(t, trace.IsNotFound(err)) }) + t.Run("revision doesn't match", func(t *testing.T) { + want := newDynamicDesktop(t, "example1") + _, err := service.CreateDynamicWindowsDesktop(ctx, want.Copy()) + require.NoError(t, err) + _, err = service.UpdateDynamicWindowsDesktop(ctx, want) + require.Error(t, err) + }) t.Run("ok", func(t *testing.T) { - want := newDynamicDesktop(t, "example") + want := newDynamicDesktop(t, "example2") created, err := service.CreateDynamicWindowsDesktop(ctx, want.Copy()) require.NoError(t, err) updated, err := service.UpdateDynamicWindowsDesktop(ctx, created.Copy()) diff --git a/lib/services/local/integrations.go b/lib/services/local/integrations.go index b22baecaa0be6..3b9842ee79690 100644 --- a/lib/services/local/integrations.go +++ b/lib/services/local/integrations.go @@ -127,22 +127,44 @@ func (s *IntegrationsService) DeleteIntegration(ctx context.Context, name string return trace.Wrap(err) } - conditionalActions, err := notReferencedByEAS(ctx, s.backend, name) + deleteConditions, err := integrationDeletionConditions(ctx, s.backend, name) if err != nil { return trace.Wrap(err) } - conditionalActions = append(conditionalActions, backend.ConditionalAction{ + deleteConditions = append(deleteConditions, backend.ConditionalAction{ Key: s.svc.MakeKey(backend.NewKey(name)), Condition: backend.Exists(), Action: backend.Delete(), }) - _, err = s.backend.AtomicWrite(ctx, conditionalActions) + _, err = s.backend.AtomicWrite(ctx, deleteConditions) return trace.Wrap(err) } -// notReferencedByEAS returns a slice of ConditionalActions to use with a backend.AtomicWrite to ensure that +// integrationDeletionConditions returns a BadParameter error if the integration is referenced by another +// Teleport service. If it does not find any direct reference, the backend.ConditionalAction is returned +// with the current state of reference, which should be added to AtomicWrite to ensure that the current +// reference state remains unchanged until the integration is completely deleted. +// Service may have zero or multiple ConditionalActions returned. +func integrationDeletionConditions(ctx context.Context, bk backend.Backend, name string) ([]backend.ConditionalAction, error) { + var deleteConditionalActions []backend.ConditionalAction + easDeleteConditions, err := integrationReferencedByEAS(ctx, bk, name) + if err != nil { + return nil, trace.Wrap(err) + } + deleteConditionalActions = append(deleteConditionalActions, easDeleteConditions...) + + awsIcDeleteCondition, err := integrationReferencedByAWSICPlugin(ctx, bk, name) + if err != nil { + return nil, trace.Wrap(err) + } + deleteConditionalActions = append(deleteConditionalActions, awsIcDeleteCondition...) + + return deleteConditionalActions, nil +} + +// integrationReferencedByEAS returns a slice of ConditionalActions to use with a backend.AtomicWrite to ensure that // integration [name] is not referenced by any EAS (External Audit Storage) integration. -func notReferencedByEAS(ctx context.Context, bk backend.Backend, name string) ([]backend.ConditionalAction, error) { +func integrationReferencedByEAS(ctx context.Context, bk backend.Backend, name string) ([]backend.ConditionalAction, error) { var conditionalActions []backend.ConditionalAction for _, key := range []backend.Key{draftExternalAuditStorageBackendKey, clusterExternalAuditStorageBackendKey} { condition := backend.ConditionalAction{ @@ -173,6 +195,42 @@ func notReferencedByEAS(ctx context.Context, bk backend.Backend, name string) ([ return conditionalActions, nil } +// integrationReferencedByAWSICPlugin returns an error if the integration name is referenced +// by an existing AWS Identity Center plugin. In case the AWS Identity Center plugin exists +// but does not reference this integration, a conditional action is returned with a revision +// of the plugin to ensure that plugin hasn't changed during deletion of the AWS OIDC integration. +func integrationReferencedByAWSICPlugin(ctx context.Context, bk backend.Backend, name string) ([]backend.ConditionalAction, error) { + var conditionalActions []backend.ConditionalAction + pluginService := NewPluginsService(bk) + plugins, err := pluginService.GetPlugins(ctx, false) + if err != nil { + return nil, trace.Wrap(err) + } + + for _, p := range plugins { + pluginV1, ok := p.(*types.PluginV1) + if !ok { + continue + } + + if pluginV1.GetType() == types.PluginType(types.PluginTypeAWSIdentityCenter) { + switch pluginV1.Spec.GetAwsIc().IntegrationName { + case name: + return nil, trace.BadParameter("cannot delete AWS OIDC integration currently referenced by AWS Identity Center integration %q", pluginV1.GetName()) + default: + conditionalActions = append(conditionalActions, backend.ConditionalAction{ + Key: backend.NewKey(pluginsPrefix, name), + Action: backend.Nop(), + Condition: backend.Revision(pluginV1.GetRevision()), + }) + return conditionalActions, nil + } + } + } + + return conditionalActions, nil +} + // DeleteAllIntegrations removes all Integration resources. This should only be used in a cache. func (s *IntegrationsService) DeleteAllIntegrations(ctx context.Context) error { if !s.cacheMode { diff --git a/lib/services/local/trust.go b/lib/services/local/trust.go index 72d2979dba675..2a2e454cdcb19 100644 --- a/lib/services/local/trust.go +++ b/lib/services/local/trust.go @@ -20,7 +20,6 @@ package local import ( "context" - "encoding/json" "errors" "log/slog" "slices" @@ -67,44 +66,164 @@ func (s *CA) CreateCertAuthority(ctx context.Context, ca types.CertAuthority) er // CreateCertAuthorities creates multiple cert authorities atomically. func (s *CA) CreateCertAuthorities(ctx context.Context, cas ...types.CertAuthority) (revision string, err error) { - var condacts []backend.ConditionalAction - var clusterNames []string - for _, ca := range cas { - if !slices.Contains(clusterNames, ca.GetName()) { - clusterNames = append(clusterNames, ca.GetName()) + condacts, err := createCertAuthoritiesCondActs(cas, true /* active */) + if err != nil { + return "", trace.Wrap(err) + } + + rev, err := s.AtomicWrite(ctx, condacts) + if err != nil { + if errors.Is(err, backend.ErrConditionFailed) { + var clusterNames []string + for _, ca := range cas { + if slices.Contains(clusterNames, ca.GetClusterName()) { + continue + } + clusterNames = append(clusterNames, ca.GetClusterName()) + } + return "", trace.AlreadyExists("one or more CAs from cluster(s) %q already exist", strings.Join(clusterNames, ",")) } + return "", trace.Wrap(err) + } + + return rev, nil +} + +// createCertAuthoritiesCondActs sets up conditional actions for creating a set of CAs. +func createCertAuthoritiesCondActs(cas []types.CertAuthority, active bool) ([]backend.ConditionalAction, error) { + condacts := make([]backend.ConditionalAction, 0, len(cas)*2) + for _, ca := range cas { if err := services.ValidateCertAuthority(ca); err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } item, err := caToItem(backend.Key{}, ca) if err != nil { - return "", trace.Wrap(err) + return nil, trace.Wrap(err) } - condacts = append(condacts, []backend.ConditionalAction{ - { - Key: activeCAKey(ca.GetID()), - Condition: backend.NotExists(), - Action: backend.Put(item), - }, - { - Key: inactiveCAKey(ca.GetID()), - Condition: backend.Whatever(), - Action: backend.Delete(), - }, - }...) + if active { + // for an enabled tc, we perform a conditional create for the active CA key + // and an unconditional delete for the inactive CA key since the active range + // is given priority over the inactive range. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: activeCAKey(ca.GetID()), + Condition: backend.NotExists(), + Action: backend.Put(item), + }, + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.Whatever(), + Action: backend.Delete(), + }, + }...) + } else { + // for a disabled tc, we perform a conditional create for the inactive CA key + // and assert the non-existence of the active CA key. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.NotExists(), + Action: backend.Put(item), + }, + { + Key: activeCAKey(ca.GetID()), + Condition: backend.NotExists(), + Action: backend.Nop(), + }, + }...) + } } - rev, err := s.AtomicWrite(ctx, condacts) - if err != nil { - if errors.Is(err, backend.ErrConditionFailed) { - return "", trace.AlreadyExists("one or more CAs from cluster(s) %q already exist", strings.Join(clusterNames, ",")) + return condacts, nil +} + +func updateCertAuthoritiesCondActs(cas []types.CertAuthority, active bool, currentlyActive bool) ([]backend.ConditionalAction, error) { + condacts := make([]backend.ConditionalAction, 0, len(cas)*2) + for _, ca := range cas { + if err := services.ValidateCertAuthority(ca); err != nil { + return nil, trace.Wrap(err) + } + + item, err := caToItem(backend.Key{}, ca) + if err != nil { + return nil, trace.Wrap(err) + } + + if active { + if currentlyActive { + // we are updating an active CA without changing its active status. we want to perform + // a conditional update on the acitve CA key and an unconditonal delete on the inactive + // CA key in order to correctly model active range priority. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: activeCAKey(ca.GetID()), + Condition: backend.Revision(item.Revision), + Action: backend.Put(item), + }, + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.Whatever(), + Action: backend.Delete(), + }, + }...) + } else { + // we are updating a currently inactive CA to the active state. we want to perform + // a create on the active CA key and a revision-conditional delete on the inactive CA key + // to affect a "move-and-update" that respects the active range priority. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: activeCAKey(ca.GetID()), + Condition: backend.NotExists(), + Action: backend.Put(item), + }, + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.Revision(item.Revision), + Action: backend.Delete(), + }, + }...) + } + } else { + if currentlyActive { + // we are updating an active CA to the inactive state. we want to perform a conditional + // delete on the active CA key and an unconditional put on the inactive CA key to + // affect a "move-and-update" that respects the active range priority. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: activeCAKey(ca.GetID()), + Condition: backend.Revision(item.Revision), + Action: backend.Delete(), + }, + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.Whatever(), + Action: backend.Put(item), + }, + }...) + + } else { + // we are updating an inactive CA without changing its active status. we want to perform + // a conditional update on the inactive CA key and assert the non-existence of the active + // CA key. + condacts = append(condacts, []backend.ConditionalAction{ + { + Key: inactiveCAKey(ca.GetID()), + Condition: backend.Revision(item.Revision), + Action: backend.Put(item), + }, + { + Key: activeCAKey(ca.GetID()), + Condition: backend.NotExists(), + Action: backend.Nop(), + }, + }...) + } } - return "", trace.Wrap(err) } - return rev, nil + return condacts, nil } // UpsertCertAuthority updates or inserts a new certificate authority @@ -198,10 +317,15 @@ func (s *CA) DeleteCertAuthority(ctx context.Context, id types.CertAuthID) error // DeleteCertAuthorities deletes multiple cert authorities atomically. func (s *CA) DeleteCertAuthorities(ctx context.Context, ids ...types.CertAuthID) error { + _, err := s.AtomicWrite(ctx, s.deleteCertAuthoritiesCondActs(ids)) + return trace.Wrap(err) +} + +func (s *CA) deleteCertAuthoritiesCondActs(ids []types.CertAuthID) []backend.ConditionalAction { var condacts []backend.ConditionalAction for _, id := range ids { if err := id.Check(); err != nil { - return trace.Wrap(err) + continue } for _, key := range []backend.Key{activeCAKey(id), inactiveCAKey(id)} { condacts = append(condacts, backend.ConditionalAction{ @@ -211,9 +335,7 @@ func (s *CA) DeleteCertAuthorities(ctx context.Context, ids ...types.CertAuthID) }) } } - - _, err := s.AtomicWrite(ctx, condacts) - return trace.Wrap(err) + return condacts } // ActivateCertAuthority moves a CertAuthority from the deactivated list to @@ -325,10 +447,26 @@ func (s *CA) DeactivateCertAuthorities(ctx context.Context, ids ...types.CertAut // GetCertAuthority returns certificate authority by given id. Parameter loadSigningKeys // controls if signing keys are loaded func (s *CA) GetCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error) { + return s.getCertAuthority(ctx, id, loadSigningKeys, true /* active */) +} + +// GetInactiveCertAuthority returns inactive certificate authority by given id. Parameter loadSigningKeys +// controls if signing keys are loaded. +func (s *CA) GetInactiveCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error) { + return s.getCertAuthority(ctx, id, loadSigningKeys, false /* inactive */) +} + +func (s *CA) getCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool, active bool) (types.CertAuthority, error) { if err := id.Check(); err != nil { return nil, trace.Wrap(err) } - item, err := s.Get(ctx, activeCAKey(id)) + + key := activeCAKey(id) + if !active { + key = inactiveCAKey(id) + } + + item, err := s.Get(ctx, key) if err != nil { return nil, trace.Wrap(err) } @@ -425,25 +563,135 @@ func (s *CA) UpdateUserCARoleMap(ctx context.Context, name string, roleMap types return nil } +// CreateTrustedCluster atomically creates a new trusted cluster along with associated resources. +func (s *CA) CreateTrustedCluster(ctx context.Context, tc types.TrustedCluster, cas []types.CertAuthority) (revision string, err error) { + if err := services.ValidateTrustedCluster(tc); err != nil { + return "", trace.Wrap(err) + } + + item, err := trustedClusterToItem(tc) + if err != nil { + return "", trace.Wrap(err) + } + + condacts := []backend.ConditionalAction{ + { + Key: item.Key, + Condition: backend.NotExists(), + Action: backend.Put(item), + }, + // also assert that no remote cluster exists by this name, as + // we currently do not allow for a trusted cluster and remote + // cluster to share a name (CAs end up stored at the same location). + { + Key: remoteClusterKey(tc.GetName()), + Condition: backend.NotExists(), + Action: backend.Nop(), + }, + } + + // perform some initial trusted-cluster related validation. common ca validation is handled later + // on by the createCertAuthoritiesCondActs helper. + for _, ca := range cas { + if tc.GetName() != ca.GetClusterName() { + return "", trace.BadParameter("trusted cluster name %q does not match CA cluster name %q", tc.GetName(), ca.GetClusterName()) + } + } + + ccas, err := createCertAuthoritiesCondActs(cas, tc.GetEnabled()) + if err != nil { + return "", trace.Wrap(err) + } + + condacts = append(condacts, ccas...) + + rev, err := s.AtomicWrite(ctx, condacts) + if err != nil { + if errors.Is(err, backend.ErrConditionFailed) { + if _, err := s.GetRemoteCluster(ctx, tc.GetName()); err == nil { + return "", trace.BadParameter("cannot create trusted cluster with same name as remote cluster %q, bidirectional trust is not supported", tc.GetName()) + } + + return "", trace.AlreadyExists("trusted cluster %q and/or one or more of its cert authorities already exists", tc.GetName()) + } + return "", trace.Wrap(err) + } + + return rev, nil +} + +// UpdateTrustedCluster atomically updates a trusted cluster along with associated resources. +func (s *CA) UpdateTrustedCluster(ctx context.Context, tc types.TrustedCluster, cas []types.CertAuthority) (revision string, err error) { + if err := services.ValidateTrustedCluster(tc); err != nil { + return "", trace.Wrap(err) + } + + // fetch the current state. we'll need this later on to correctly construct our CA condacts, and + // it doesn't hurt to reject mismatched revisions early. + extant, err := s.GetTrustedCluster(ctx, tc.GetName()) + if err != nil { + return "", trace.Wrap(err) + } + + if tc.GetRevision() != extant.GetRevision() { + return "", trace.CompareFailed("trusted cluster %q has been modified, please retry", tc.GetName()) + } + + item, err := trustedClusterToItem(tc) + if err != nil { + return "", trace.Wrap(err) + } + + condacts := []backend.ConditionalAction{ + { + Key: item.Key, + Condition: backend.Revision(item.Revision), + Action: backend.Put(item), + }, + } + + // perform some initial trusted-cluster related validation. common ca validation is handled later + // on by the createCertAuthoritiesCondActs helper. + for _, ca := range cas { + if tc.GetName() != ca.GetClusterName() { + return "", trace.BadParameter("trusted cluster name %q does not match CA cluster name %q", tc.GetName(), ca.GetClusterName()) + } + } + + ccas, err := updateCertAuthoritiesCondActs(cas, tc.GetEnabled(), extant.GetEnabled()) + if err != nil { + return "", trace.Wrap(err) + } + + condacts = append(condacts, ccas...) + + rev, err := s.AtomicWrite(ctx, condacts) + if err != nil { + if errors.Is(err, backend.ErrConditionFailed) { + return "", trace.CompareFailed("trusted cluster %q and/or one or more of its cert authorities have been modified, please retry", tc.GetName()) + } + return "", trace.Wrap(err) + } + + return rev, nil +} + // UpsertTrustedCluster creates or updates a TrustedCluster in the backend. func (s *CA) UpsertTrustedCluster(ctx context.Context, trustedCluster types.TrustedCluster) (types.TrustedCluster, error) { if err := services.ValidateTrustedCluster(trustedCluster); err != nil { return nil, trace.Wrap(err) } - rev := trustedCluster.GetRevision() - value, err := services.MarshalTrustedCluster(trustedCluster) + + item, err := trustedClusterToItem(trustedCluster) if err != nil { return nil, trace.Wrap(err) } - _, err = s.Put(ctx, backend.Item{ - Key: backend.NewKey(trustedClustersPrefix, trustedCluster.GetName()), - Value: value, - Expires: trustedCluster.Expiry(), - Revision: rev, - }) + + _, err = s.Put(ctx, item) if err != nil { return nil, trace.Wrap(err) } + return trustedCluster, nil } @@ -482,16 +730,44 @@ func (s *CA) GetTrustedClusters(ctx context.Context) ([]types.TrustedCluster, er // DeleteTrustedCluster removes a TrustedCluster from the backend by name. func (s *CA) DeleteTrustedCluster(ctx context.Context, name string) error { + return s.DeleteTrustedClusterInternal(ctx, name, nil /* no cert authorities */) +} + +// DeleteTrustedClusterInternal removes a trusted cluster and associated resources atomically. +func (s *CA) DeleteTrustedClusterInternal(ctx context.Context, name string, caIDs []types.CertAuthID) error { if name == "" { return trace.BadParameter("missing trusted cluster name") } - err := s.Delete(ctx, backend.NewKey(trustedClustersPrefix, name)) - if err != nil { - if trace.IsNotFound(err) { + + for _, id := range caIDs { + if err := id.Check(); err != nil { + return trace.Wrap(err) + } + + if id.DomainName != name { + return trace.BadParameter("ca %q does not belong to trusted cluster %q", id.DomainName, name) + } + } + + condacts := []backend.ConditionalAction{ + { + Key: backend.NewKey(trustedClustersPrefix, name), + Condition: backend.Exists(), + Action: backend.Delete(), + }, + } + + condacts = append(condacts, s.deleteCertAuthoritiesCondActs(caIDs)...) + + if _, err := s.AtomicWrite(ctx, condacts); err != nil { + if errors.Is(err, backend.ErrConditionFailed) { return trace.NotFound("trusted cluster %q is not found", name) } + + return trace.Wrap(err) } - return trace.Wrap(err) + + return nil } // UpsertTunnelConnection updates or creates tunnel connection @@ -608,25 +884,71 @@ func (s *CA) DeleteAllTunnelConnections() error { return trace.Wrap(err) } -// CreateRemoteCluster creates remote cluster -func (s *CA) CreateRemoteCluster( - ctx context.Context, rc types.RemoteCluster, -) (types.RemoteCluster, error) { - value, err := json.Marshal(rc) +// CreateRemoteCluster creates a remote cluster +func (s *CA) CreateRemoteCluster(ctx context.Context, rc types.RemoteCluster) (types.RemoteCluster, error) { + rev, err := s.CreateRemoteClusterInternal(ctx, rc, nil) if err != nil { return nil, trace.Wrap(err) } - item := backend.Item{ - Key: backend.NewKey(remoteClustersPrefix, rc.GetName()), - Value: value, - Expires: rc.Expiry(), + + rc.SetRevision(rev) + return rc, nil +} + +// CreateRemoteCluster atomically creates a new remote cluster along with associated resources. +func (s *CA) CreateRemoteClusterInternal(ctx context.Context, rc types.RemoteCluster, cas []types.CertAuthority) (revision string, err error) { + if err := services.CheckAndSetDefaults(rc); err != nil { + return "", trace.Wrap(err) } - lease, err := s.Create(ctx, item) + + item, err := remoteClusterToItem(rc) if err != nil { - return nil, trace.Wrap(err) + return "", trace.Wrap(err) } - rc.SetRevision(lease.Revision) - return rc, nil + + condacts := []backend.ConditionalAction{ + { + Key: item.Key, + Condition: backend.NotExists(), + Action: backend.Put(item), + }, + // also assert that no trusted cluster exists by this name, as + // we currently do not allow for a trusted cluster and remote + // cluster to share a name (CAs end up stored at the same location). + { + Key: trustedClusterKey(rc.GetName()), + Condition: backend.NotExists(), + Action: backend.Nop(), + }, + } + + // perform some initial remote-cluster related validation. common ca validation is handled later + // on by the createCertAuthoritiesCondActs helper. + for _, ca := range cas { + if rc.GetName() != ca.GetClusterName() { + return "", trace.BadParameter("remote cluster name %q does not match CA cluster name %q", rc.GetName(), ca.GetClusterName()) + } + } + + ccas, err := createCertAuthoritiesCondActs(cas, true /* remote cluster cas always considered active */) + if err != nil { + return "", trace.Wrap(err) + } + + condacts = append(condacts, ccas...) + + rev, err := s.AtomicWrite(ctx, condacts) + if err != nil { + if errors.Is(err, backend.ErrConditionFailed) { + if _, err := s.GetTrustedCluster(ctx, rc.GetName()); err == nil { + return "", trace.BadParameter("cannot create remote cluster with same name as trusted cluster %q, bidirectional trust is not supported", rc.GetName()) + } + return "", trace.AlreadyExists("remote cluster %q and/or one or more of its cert authorities already exists", rc.GetName()) + } + return "", trace.Wrap(err) + } + + return rev, nil } // UpdateRemoteCluster updates selected remote cluster fields: expiry and labels @@ -652,17 +974,12 @@ func (s *CA) UpdateRemoteCluster(ctx context.Context, rc types.RemoteCluster) (t existing.SetConnectionStatus(rc.GetConnectionStatus()) existing.SetMetadata(rc.GetMetadata()) - updateValue, err := services.MarshalRemoteCluster(existing) + item, err := remoteClusterToItem(existing) if err != nil { return nil, trace.Wrap(err) } - lease, err := s.ConditionalUpdate(ctx, backend.Item{ - Key: backend.NewKey(remoteClustersPrefix, existing.GetName()), - Value: updateValue, - Expires: existing.Expiry(), - Revision: existing.GetRevision(), - }) + lease, err := s.ConditionalUpdate(ctx, item) if err != nil { if trace.IsCompareFailed(err) { // Retry! @@ -707,17 +1024,12 @@ func (s *CA) PatchRemoteCluster( return nil, trace.BadParameter("metadata.revision: cannot be patched") } - updatedValue, err := services.MarshalRemoteCluster(updated) + item, err := remoteClusterToItem(updated) if err != nil { return nil, trace.Wrap(err) } - lease, err := s.ConditionalUpdate(ctx, backend.Item{ - Key: backend.NewKey(remoteClustersPrefix, name), - Value: updatedValue, - Expires: updated.Expiry(), - Revision: updated.GetRevision(), - }) + lease, err := s.ConditionalUpdate(ctx, item) if err != nil { if trace.IsCompareFailed(err) { // Retry! @@ -822,13 +1134,44 @@ func (s *CA) GetRemoteCluster( } // DeleteRemoteCluster deletes remote cluster by name -func (s *CA) DeleteRemoteCluster( - ctx context.Context, clusterName string, -) error { - if clusterName == "" { +func (s *CA) DeleteRemoteCluster(ctx context.Context, clusterName string) error { + return s.DeleteRemoteClusterInternal(ctx, clusterName, nil /* no cert authorities */) +} + +// DeleteRemoteClusterInternal atomically deletes a remote cluster along with associated resources. +func (s *CA) DeleteRemoteClusterInternal(ctx context.Context, name string, ids []types.CertAuthID) error { + if name == "" { return trace.BadParameter("missing parameter cluster name") } - return s.Delete(ctx, backend.NewKey(remoteClustersPrefix, clusterName)) + + for _, id := range ids { + if err := id.Check(); err != nil { + return trace.Wrap(err) + } + + if id.DomainName != name { + return trace.BadParameter("ca %q does not belong to remote cluster %q", id.DomainName, name) + } + } + + condacts := []backend.ConditionalAction{ + { + Key: remoteClusterKey(name), + Condition: backend.Exists(), + Action: backend.Delete(), + }, + } + + condacts = append(condacts, s.deleteCertAuthoritiesCondActs(ids)...) + + if _, err := s.AtomicWrite(ctx, condacts); err != nil { + if errors.Is(err, backend.ErrConditionFailed) { + return trace.NotFound("remote cluster %q is not found", name) + } + return trace.Wrap(err) + } + + return nil } // DeleteAllRemoteClusters deletes all remote clusters @@ -853,6 +1196,42 @@ func caToItem(key backend.Key, ca types.CertAuthority) (backend.Item, error) { }, nil } +func trustedClusterToItem(tc types.TrustedCluster) (backend.Item, error) { + value, err := services.MarshalTrustedCluster(tc) + if err != nil { + return backend.Item{}, trace.Wrap(err) + } + + return backend.Item{ + Key: trustedClusterKey(tc.GetName()), + Value: value, + Expires: tc.Expiry(), + Revision: tc.GetRevision(), + }, nil +} + +func trustedClusterKey(name string) backend.Key { + return backend.NewKey(trustedClustersPrefix, name) +} + +func remoteClusterToItem(rc types.RemoteCluster) (backend.Item, error) { + value, err := services.MarshalRemoteCluster(rc) + if err != nil { + return backend.Item{}, trace.Wrap(err) + } + + return backend.Item{ + Key: remoteClusterKey(rc.GetName()), + Value: value, + Expires: rc.Expiry(), + Revision: rc.GetRevision(), + }, nil +} + +func remoteClusterKey(name string) backend.Key { + return backend.NewKey(remoteClustersPrefix, name) +} + // activeCAKey builds the active key variant for the supplied ca id. func activeCAKey(id types.CertAuthID) backend.Key { return backend.NewKey(authoritiesPrefix, string(id.Type), id.DomainName) diff --git a/lib/services/local/trust_test.go b/lib/services/local/trust_test.go index 34a85171d4887..3188c546e6c16 100644 --- a/lib/services/local/trust_test.go +++ b/lib/services/local/trust_test.go @@ -20,6 +20,7 @@ package local import ( "context" + "crypto/x509/pkix" "fmt" "testing" "time" @@ -32,11 +33,205 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/auth/testauthority" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/backend/lite" "github.com/gravitational/teleport/lib/backend/memory" + "github.com/gravitational/teleport/lib/tlsca" ) +func TestUpdateCertAuthorityCondActs(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // setup closure creates our initial state and returns its components + setup := func(active bool) (types.TrustedCluster, types.CertAuthority, *CA) { + bk, err := memory.New(memory.Config{}) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, bk.Close()) }) + service := NewCAService(bk) + + tc, err := types.NewTrustedCluster("tc", types.TrustedClusterSpecV2{ + Enabled: active, + Roles: []string{"rrr"}, + Token: "xxx", + ProxyAddress: "xxx", + ReverseTunnelAddress: "xxx", + }) + require.NoError(t, err) + + ca := newCertAuthority(t, types.HostCA, "tc") + revision, err := service.CreateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + return tc, ca, service + } + + // putCA is a helper for injecting a CA into the backend, bypassing atomic condition protections + putCA := func(ctx context.Context, service *CA, ca types.CertAuthority, active bool) { + key := activeCAKey(ca.GetID()) + if !active { + key = inactiveCAKey(ca.GetID()) + } + item, err := caToItem(key, ca) + require.NoError(t, err) + _, err = service.Put(ctx, item) + require.NoError(t, err) + } + + // delCA is a helper for deleting a CA from the backend, bypassing atomic condition protections + delCA := func(ctx context.Context, service *CA, ca types.CertAuthority, active bool) { + key := activeCAKey(ca.GetID()) + if !active { + key = inactiveCAKey(ca.GetID()) + } + require.NoError(t, service.Delete(ctx, key)) + } + + // -- update active in place --- + tc, ca, service := setup(true /* active */) + + // verify basic update works + tc.SetRoles([]string{"rrr", "zzz"}) + revision, err := service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err := service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + // verify that an inactive CA doesn't prevent update + putCA(ctx, service, ca, false /* inactive */) + tc.SetRoles([]string{"rrr", "zzz", "aaa"}) + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + // verify that concurrent update of the active CA causes update to fail + putCA(ctx, service, ca, true /* active */) + tc.SetRoles([]string{"rrr", "zzz", "aaa", "bbb"}) + _, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.True(t, trace.IsCompareFailed(err), "err=%v", err) + + // --- update inactive in place --- + tc, ca, service = setup(false /* inactive */) + + // verify basic update works + tc.SetRoles([]string{"rrr", "zzz"}) + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + + // verify that an active CA prevents update + putCA(ctx, service, ca, true /* active */) + tc.SetRoles([]string{"rrr", "zzz", "aaa"}) + _, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.True(t, trace.IsCompareFailed(err), "err=%v", err) + delCA(ctx, service, ca, true /* active */) + + // verify that concurrent update of the inactive CA causes update to fail + putCA(ctx, service, ca, false /* inactive */) + tc.SetRoles([]string{"rrr", "zzz", "aaa", "bbb"}) + _, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.True(t, trace.IsCompareFailed(err), "err=%v", err) + + // --- activate/deactivate --- + tc, ca, service = setup(false /* inactive */) + + // verify that activating works + tc.SetEnabled(true) + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + // verify that deactivating works + tc.SetEnabled(false) + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + + // verify that an active CA conflicts with activation + putCA(ctx, service, ca, true /* active */) + tc.SetEnabled(true) + _, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.True(t, trace.IsCompareFailed(err), "err=%v", err) + delCA(ctx, service, ca, true /* active */) + + // activation should work after deleting conlicting CA + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + // verify that deactivation works even if there is an inaactive CA present + putCA(ctx, service, ca, false /* inactive */) + tc.SetEnabled(false) + revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + tc.SetRevision(revision) + ca.SetRevision(revision) + + gotTC, err = service.GetTrustedCluster(ctx, tc.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + _, err = service.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + _, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) +} + func TestRemoteClusterCRUD(t *testing.T) { t.Parallel() ctx := context.Background() @@ -67,22 +262,38 @@ func TestRemoteClusterCRUD(t *testing.T) { src.SetConnectionStatus(teleport.RemoteClusterStatusOnline) src.SetLastHeartbeat(clock.Now().Add(-time.Hour)) - // create remote clusters - gotRC, err := trustService.CreateRemoteCluster(ctx, rc) + // set up fake CAs for the remote clusters + ca := newCertAuthority(t, types.HostCA, "foo") + sca := newCertAuthority(t, types.HostCA, "bar") + + // create remote cluster + revision, err := trustService.CreateRemoteClusterInternal(ctx, rc, []types.CertAuthority{ca}) require.NoError(t, err) - require.Empty(t, cmp.Diff(rc, gotRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) - gotSRC, err := trustService.CreateRemoteCluster(ctx, src) + rc.SetRevision(revision) + ca.SetRevision(revision) + + _, err = trustService.CreateRemoteClusterInternal(ctx, rc, []types.CertAuthority{ca}) + require.True(t, trace.IsAlreadyExists(err), "err=%v", err) + + revision, err = trustService.CreateRemoteClusterInternal(ctx, src, []types.CertAuthority{sca}) require.NoError(t, err) - require.Empty(t, cmp.Diff(src, gotSRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + src.SetRevision(revision) + sca.SetRevision(revision) // get remote cluster make sure it's correct - gotRC, err = trustService.GetRemoteCluster(ctx, "foo") + gotRC, err := trustService.GetRemoteCluster(ctx, "foo") require.NoError(t, err) require.Equal(t, "foo", gotRC.GetName()) require.Equal(t, teleport.RemoteClusterStatusOffline, gotRC.GetConnectionStatus()) require.Equal(t, clock.Now().Nanosecond(), gotRC.GetLastHeartbeat().Nanosecond()) require.Equal(t, originalLabels, gotRC.GetMetadata().Labels) + // get remote cluster CA make sure it's correct + gotCA, err := trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + + require.Empty(t, cmp.Diff(ca, gotCA, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + rc = gotRC updatedLabels := map[string]string{ "e": "f", @@ -99,10 +310,9 @@ func TestRemoteClusterCRUD(t *testing.T) { require.NoError(t, err) require.Empty(t, cmp.Diff(rc, gotRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) - src = gotSRC src.SetConnectionStatus(teleport.RemoteClusterStatusOffline) src.SetLastHeartbeat(clock.Now()) - gotSRC, err = trustService.UpdateRemoteCluster(ctx, src) + gotSRC, err := trustService.UpdateRemoteCluster(ctx, src) require.NoError(t, err) require.Empty(t, cmp.Diff(src, gotSRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) @@ -126,13 +336,26 @@ func TestRemoteClusterCRUD(t *testing.T) { require.Len(t, allRC, 2) // delete cluster - err = trustService.DeleteRemoteCluster(ctx, "foo") + err = trustService.DeleteRemoteClusterInternal(ctx, "foo", []types.CertAuthID{ca.GetID()}) require.NoError(t, err) // make sure it's really gone - err = trustService.DeleteRemoteCluster(ctx, "foo") - require.Error(t, err) - require.ErrorIs(t, err, trace.NotFound(`key "/remoteClusters/foo" is not found`)) + _, err = trustService.GetRemoteCluster(ctx, "foo") + require.True(t, trace.IsNotFound(err)) + _, err = trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err)) + + // make sure we can't create trusted clusters with the same name as an extant remote cluster + tc, err := types.NewTrustedCluster("bar", types.TrustedClusterSpecV2{ + Enabled: true, + Roles: []string{"bar", "baz"}, + Token: "qux", + ProxyAddress: "quux", + ReverseTunnelAddress: "quuz", + }) + require.NoError(t, err) + _, err = trustService.CreateTrustedCluster(ctx, tc, nil) + require.True(t, trace.IsBadParameter(err), "err=%v", err) } func TestPresenceService_PatchRemoteCluster(t *testing.T) { @@ -290,10 +513,13 @@ func TestTrustedClusterCRUD(t *testing.T) { }) require.NoError(t, err) + ca := newCertAuthority(t, types.HostCA, "foo") + sca := newCertAuthority(t, types.HostCA, "bar") + // create trusted clusters - _, err = trustService.UpsertTrustedCluster(ctx, tc) + _, err = trustService.CreateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) require.NoError(t, err) - _, err = trustService.UpsertTrustedCluster(ctx, stc) + _, err = trustService.CreateTrustedCluster(ctx, stc, []types.CertAuthority{sca}) require.NoError(t, err) // get trusted cluster make sure it's correct @@ -306,17 +532,87 @@ func TestTrustedClusterCRUD(t *testing.T) { require.Equal(t, "quux", gotTC.GetProxyAddress()) require.Equal(t, "quuz", gotTC.GetReverseTunnelAddress()) + // get trusted cluster CA make sure it's correct + gotCA, err := trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + require.Empty(t, cmp.Diff(ca, gotCA, cmpopts.IgnoreFields(types.Metadata{}, "Revision"))) + // get all clusters allTC, err := trustService.GetTrustedClusters(ctx) require.NoError(t, err) require.Len(t, allTC, 2) + // verify that enabling/disabling correctly shows/hides CAs + tc.SetEnabled(false) + tc.SetRevision(gotTC.GetRevision()) + ca.SetRevision(gotCA.GetRevision()) + revision, err := trustService.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + _, err = trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + _, err = trustService.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + + tc.SetEnabled(true) + tc.SetRevision(revision) + ca.SetRevision(revision) + _, err = trustService.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca}) + require.NoError(t, err) + + _, err = trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.NoError(t, err) + _, err = trustService.GetInactiveCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + // delete cluster - err = trustService.DeleteTrustedCluster(ctx, "foo") + err = trustService.DeleteTrustedClusterInternal(ctx, "foo", []types.CertAuthID{ca.GetID()}) require.NoError(t, err) // make sure it's really gone _, err = trustService.GetTrustedCluster(ctx, "foo") - require.Error(t, err) - require.ErrorIs(t, err, trace.NotFound(`key "/trustedclusters/foo" is not found`)) + require.True(t, trace.IsNotFound(err), "err=%v", err) + _, err = trustService.GetCertAuthority(ctx, ca.GetID(), true) + require.True(t, trace.IsNotFound(err), "err=%v", err) + + // make sure we can't create remote clusters with the same name as an extant trusted cluster + rc, err := types.NewRemoteCluster("bar") + require.NoError(t, err) + _, err = trustService.CreateRemoteCluster(ctx, rc) + require.True(t, trace.IsBadParameter(err), "err=%v", err) +} + +func newCertAuthority(t *testing.T, caType types.CertAuthType, domain string) types.CertAuthority { + t.Helper() + + ta := testauthority.New() + priv, pub, err := ta.GenerateKeyPair() + require.NoError(t, err) + + key, cert, err := tlsca.GenerateSelfSignedCA(pkix.Name{CommonName: domain}, nil, time.Hour) + require.NoError(t, err) + + ca, err := types.NewCertAuthority(types.CertAuthoritySpecV2{ + Type: caType, + ClusterName: domain, + ActiveKeys: types.CAKeySet{ + SSH: []*types.SSHKeyPair{{ + PrivateKey: priv, + PrivateKeyType: types.PrivateKeyType_RAW, + PublicKey: pub, + }}, + TLS: []*types.TLSKeyPair{{ + Cert: cert, + Key: key, + }}, + JWT: []*types.JWTKeyPair{{ + PublicKey: pub, + PrivateKey: priv, + PrivateKeyType: types.PrivateKeyType_RAW, + }}, + }, + }) + require.NoError(t, err) + + return ca } diff --git a/lib/services/presets.go b/lib/services/presets.go index 068bccba13e04..1d6a4bfc5c6c1 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -182,6 +182,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindNotification, RW()), types.NewRule(types.KindStaticHostUser, RW()), types.NewRule(types.KindUserTask, RW()), + types.NewRule(types.KindIdentityCenterAccount, RW()), }, }, }, diff --git a/lib/services/reconciler.go b/lib/services/reconciler.go index fb7ee569524b0..17b136a056152 100644 --- a/lib/services/reconciler.go +++ b/lib/services/reconciler.go @@ -23,7 +23,6 @@ import ( "log/slog" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -54,9 +53,6 @@ type GenericReconcilerConfig[K comparable, T any] struct { OnUpdate func(ctx context.Context, new, old T) error // OnDelete is called when an existing resource is deleted. OnDelete func(context.Context, T) error - // Log is the reconciler's logger. - // TODO(tross) remove this when all components in e have been updated - Log logrus.FieldLogger // Logger emits log messages. Logger *slog.Logger } diff --git a/lib/services/role.go b/lib/services/role.go index 083b277c9b81f..97eb20c03fdb3 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -78,6 +78,8 @@ var DefaultImplicitRules = []types.Rule{ types.NewRule(types.KindUsageEvent, []string{types.VerbCreate}), types.NewRule(types.KindVnetConfig, RO()), types.NewRule(types.KindSPIFFEFederation, RO()), + types.NewRule(types.KindSAMLIdPServiceProvider, RO()), + types.NewRule(types.KindIdentityCenterAccount, RO()), } // DefaultCertAuthorityRules provides access the minimal set of resources diff --git a/lib/services/role_test.go b/lib/services/role_test.go index 9beb05cfe0d2b..ae4b1c9167817 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -2441,6 +2441,78 @@ func TestCheckRuleAccess(t *testing.T) { } } +func TestDefaultImplicitRules(t *testing.T) { + type check struct { + hasAccess bool + verb string + namespace string + rule string + context testContext + } + testCases := []struct { + name string + role types.Role + checks []check + }{ + { + name: "KindIdentityCenterAccount with NewPresetAccessRole", + role: NewPresetAccessRole(), + checks: []check{ + {rule: types.KindIdentityCenterAccount, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbCreate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbUpdate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbDelete, namespace: apidefaults.Namespace, hasAccess: false}, + }, + }, + { + name: "KindIdentityCenterAccount with a custom role that does not explicitly target read and list verbs for KindIdentityCenterAccount", + role: newRole(func(r *types.RoleV6) {}), + checks: []check{ + {rule: types.KindIdentityCenterAccount, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbCreate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbUpdate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindIdentityCenterAccount, verb: types.VerbDelete, namespace: apidefaults.Namespace, hasAccess: false}, + }, + }, + { + name: "KindSAMLIdPServiceProvider with NewPresetAccessRole", + role: NewPresetAccessRole(), + checks: []check{ + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbCreate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbUpdate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbDelete, namespace: apidefaults.Namespace, hasAccess: false}, + }, + }, + { + name: "KindSAMLIdPServiceProvider with a custom role that does not explicitly target read and list verbs for KindSAMLIdPServiceProvider", + role: newRole(func(r *types.RoleV6) {}), + checks: []check{ + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: true}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbCreate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbUpdate, namespace: apidefaults.Namespace, hasAccess: false}, + {rule: types.KindSAMLIdPServiceProvider, verb: types.VerbDelete, namespace: apidefaults.Namespace, hasAccess: false}, + }, + }, + } + for _, tc := range testCases { + roleSet := NewRoleSet(tc.role) + for _, check := range tc.checks { + result := roleSet.CheckAccessToRule(&check.context, check.namespace, check.rule, check.verb) + if check.hasAccess { + require.NoError(t, result) + } else { + require.True(t, trace.IsAccessDenied(result)) + } + + } + } +} + func TestMFAVerificationInterval(t *testing.T) { testCases := []struct { name string diff --git a/lib/services/trust.go b/lib/services/trust.go index c7cbfe0229bce..63775ae5b52bb 100644 --- a/lib/services/trust.go +++ b/lib/services/trust.go @@ -83,6 +83,26 @@ type Trust interface { // auth server for some local operations. type TrustInternal interface { Trust + + // CreateTrustedCluster atomically creates a new trusted cluster along with associated resources. + CreateTrustedCluster(context.Context, types.TrustedCluster, []types.CertAuthority) (revision string, err error) + + // UpdateTrustedCluster atomically updates a trusted cluster along with associated resources. + UpdateTrustedCluster(context.Context, types.TrustedCluster, []types.CertAuthority) (revision string, err error) + + // DeleteTrustedClusterInternal atomically deletes a trusted cluster along with associated resources. + DeleteTrustedClusterInternal(context.Context, string, []types.CertAuthID) error + + // CreateRemoteCluster atomically creates a new remote cluster along with associated resources. + CreateRemoteClusterInternal(context.Context, types.RemoteCluster, []types.CertAuthority) (revision string, err error) + + // DeleteRemotClusterInternal atomically deletes a remote cluster along with associated resources. + DeleteRemoteClusterInternal(context.Context, string, []types.CertAuthID) error + + // GetInactiveCertAuthority returns inactive certificate authority by given id. Parameter loadSigningKeys + // controls if signing keys are loaded. + GetInactiveCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error) + // CreateCertAuthorities creates multiple cert authorities atomically. CreateCertAuthorities(context.Context, ...types.CertAuthority) (revision string, err error) diff --git a/lib/srv/app/watcher.go b/lib/srv/app/watcher.go index c88e73dd7f5ab..706a17b2ea60f 100644 --- a/lib/srv/app/watcher.go +++ b/lib/srv/app/watcher.go @@ -21,7 +21,6 @@ package app import ( "context" "fmt" - "log/slog" "github.com/gravitational/trace" @@ -42,8 +41,7 @@ func (s *Server) startReconciler(ctx context.Context) error { OnCreate: s.onCreate, OnUpdate: s.onUpdate, OnDelete: s.onDelete, - // TODO(tross): update to use the server logger once it is converted to slog - Logger: slog.With("kind", types.KindApp), + Logger: s.log.With("kind", types.KindApp), }) if err != nil { return trace.Wrap(err) @@ -77,9 +75,8 @@ func (s *Server) startResourceWatcher(ctx context.Context) (*services.GenericWat watcher, err := services.NewAppWatcher(ctx, services.AppWatcherConfig{ ResourceWatcherConfig: services.ResourceWatcherConfig{ Component: teleport.ComponentApp, - // TODO(tross): update this once converted to use slog - // Log: s.log, - Client: s.c.AccessPoint, + Logger: s.log, + Client: s.c.AccessPoint, }, AppGetter: s.c.AccessPoint, }) diff --git a/lib/srv/desktop/discovery.go b/lib/srv/desktop/discovery.go index 852468927e46c..93a46deeb2025 100644 --- a/lib/srv/desktop/discovery.go +++ b/lib/srv/desktop/discovery.go @@ -23,7 +23,6 @@ import ( "encoding/hex" "errors" "fmt" - "log/slog" "maps" "net" "net/netip" @@ -55,8 +54,7 @@ func (s *WindowsService) startDesktopDiscovery() error { OnCreate: s.upsertDesktop, OnUpdate: s.updateDesktop, OnDelete: s.deleteDesktop, - // TODO(tross): update to use the service logger once it is converted to use slog - Logger: slog.With("kind", types.KindWindowsDesktop), + Logger: s.cfg.Logger.With("kind", types.KindWindowsDesktop), }) if err != nil { return trace.Wrap(err) @@ -100,6 +98,16 @@ func (s *WindowsService) ldapSearchFilter() string { // getDesktopsFromLDAP discovers Windows hosts via LDAP func (s *WindowsService) getDesktopsFromLDAP() map[string]types.WindowsDesktop { + // Check whether we've ever successfully initialized our LDAP client. + s.mu.Lock() + if !s.ldapInitialized { + s.cfg.Logger.DebugContext(context.Background(), "LDAP not ready, skipping discovery and attempting to reconnect") + s.mu.Unlock() + s.initializeLDAP() + return nil + } + s.mu.Unlock() + filter := s.ldapSearchFilter() s.cfg.Logger.DebugContext(context.Background(), "searching for desktops", "filter", filter) diff --git a/lib/srv/desktop/discovery_test.go b/lib/srv/desktop/discovery_test.go index fc188f75ce1d6..01941e02d0056 100644 --- a/lib/srv/desktop/discovery_test.go +++ b/lib/srv/desktop/discovery_test.go @@ -279,7 +279,7 @@ func TestDynamicWindowsDiscovery(t *testing.T) { } desktop.Spec.Addr = "addr2" - _, err = dynamicWindowsClient.UpdateDynamicWindowsDesktop(ctx, desktop) + _, err = dynamicWindowsClient.UpsertDynamicWindowsDesktop(ctx, desktop) require.NoError(t, err) time.Sleep(10 * time.Millisecond) diff --git a/lib/srv/desktop/windows_server.go b/lib/srv/desktop/windows_server.go index 77b272acd0696..fd75cbc89bd04 100644 --- a/lib/srv/desktop/windows_server.go +++ b/lib/srv/desktop/windows_server.go @@ -450,7 +450,20 @@ func (s *WindowsService) startLDAPConnectionCheck(ctx context.Context) { for { select { case <-t.Chan(): - // attempt to read CAs in the NTAuth store (we know we have permissions to do so) + // First check if we have successfully initialized the LDAP client. + // If not, then do that now and return. + // (This mimics the check that is performed when LDAP discovery is enabled.) + s.mu.Lock() + if !s.ldapInitialized { + s.cfg.Logger.DebugContext(context.Background(), "LDAP not ready, attempting to reconnect") + s.mu.Unlock() + s.initializeLDAP() + return + } + s.mu.Unlock() + + // If we have initizlied the LDAP client, then try to use it to make sure we're still connected + // by attempting to read CAs in the NTAuth store (we know we have permissions to do so). ntAuthDN := "CN=NTAuthCertificates,CN=Public Key Services,CN=Services,CN=Configuration," + s.cfg.LDAPConfig.DomainDN() _, err := s.lc.Read(ntAuthDN, "certificationAuthority", []string{"cACertificate"}) if trace.IsConnectionProblem(err) { diff --git a/lib/usagereporter/teleport/aggregating/reporter.go b/lib/usagereporter/teleport/aggregating/reporter.go index 66d723f6fba78..2813581976afd 100644 --- a/lib/usagereporter/teleport/aggregating/reporter.go +++ b/lib/usagereporter/teleport/aggregating/reporter.go @@ -28,7 +28,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" prehogv1 "github.com/gravitational/teleport/gen/proto/go/prehog/v1" @@ -52,15 +51,11 @@ const ( type ReporterConfig struct { // Backend is the backend used to store reports. Required Backend backend.Backend - // Log is the logger used for logging. - // TODO(tross): Delete once e has been converted - Log logrus.FieldLogger // Logger is the used for emitting log messages. Logger *slog.Logger // Clock is the clock used for timestamping reports and deciding when to // persist them to the backend. Optional, defaults to the real clock. Clock clockwork.Clock - // ClusterName is the ClusterName resource for the current cluster, used for // anonymization and to report the cluster name itself. Required. ClusterName types.ClusterName diff --git a/lib/usagereporter/teleport/aggregating/submitter.go b/lib/usagereporter/teleport/aggregating/submitter.go index 528966336260b..e36dca61e32ff 100644 --- a/lib/usagereporter/teleport/aggregating/submitter.go +++ b/lib/usagereporter/teleport/aggregating/submitter.go @@ -26,7 +26,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" @@ -69,9 +68,6 @@ var alertMessage = fmt.Sprintf("Teleport has failed to contact the usage reporti type SubmitterConfig struct { // Backend is the backend to use to read reports and apply locks. Required. Backend backend.Backend - // Log is the [logrus.FieldLogger] used for logging. - // TODO(tross): remove once e has been converted - Log logrus.FieldLogger // Logger is the used for emitting log messages. Logger *slog.Logger // Status is used to create or clear cluster alerts on a failure. Required. diff --git a/lib/usagereporter/teleport/usagereporter.go b/lib/usagereporter/teleport/usagereporter.go index 928b37a0dbe8a..91b2845611b9c 100644 --- a/lib/usagereporter/teleport/usagereporter.go +++ b/lib/usagereporter/teleport/usagereporter.go @@ -122,16 +122,7 @@ func (t *StreamingUsageReporter) Run(ctx context.Context) { type SubmitFunc = usagereporter.SubmitFunc[prehogv1a.SubmitEventRequest] -// TODO(tross): change the log type once e has been updated -func NewStreamingUsageReporter(log any, clusterName types.ClusterName, anonymizationKey string, submitter SubmitFunc) (*StreamingUsageReporter, error) { - logger := slog.Default() - - if log != nil { - if l, ok := log.(*slog.Logger); ok { - logger = l - } - } - +func NewStreamingUsageReporter(logger *slog.Logger, clusterName types.ClusterName, anonymizationKey string, submitter SubmitFunc) (*StreamingUsageReporter, error) { if anonymizationKey == "" { return nil, trace.BadParameter("anonymization key is required") } diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index de4d085377c3c..2bbb2bcbdd561 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -65,6 +65,7 @@ import ( "github.com/gravitational/teleport/api/client/webclient" "github.com/gravitational/teleport/api/constants" apidefaults "github.com/gravitational/teleport/api/defaults" + autoupdatepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1" mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" "github.com/gravitational/teleport/api/mfa" @@ -135,6 +136,8 @@ const ( // This cache is here to protect against accidental or intentional DDoS, the TTL must be low to quickly reflect // cluster configuration changes. findEndpointCacheTTL = 10 * time.Second + // DefaultAgentUpdateJitterSeconds is the default jitter agents should wait before updating. + DefaultAgentUpdateJitterSeconds = 60 ) // healthCheckAppServerFunc defines a function used to perform a health check @@ -1539,69 +1542,65 @@ func (h *Handler) ping(w http.ResponseWriter, r *http.Request, p httprouter.Para MinClientVersion: teleport.MinClientVersion, ClusterName: h.auth.clusterName, AutomaticUpgrades: pr.ServerFeatures.GetAutomaticUpgrades(), + AutoUpdate: h.automaticUpdateSettings(r.Context()), + Edition: modules.GetModules().BuildType(), + FIPS: modules.IsBoringBinary(), }, nil } func (h *Handler) find(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { // cache the generic answer to avoid doing work for each request resp, err := utils.FnCacheGet[*webclient.PingResponse](r.Context(), h.findEndpointCache, "find", func(ctx context.Context) (*webclient.PingResponse, error) { - response := webclient.PingResponse{ - ServerVersion: teleport.Version, - MinClientVersion: teleport.MinClientVersion, - ClusterName: h.auth.clusterName, - } - - proxyConfig, err := h.cfg.ProxySettings.GetProxySettings(r.Context()) + proxyConfig, err := h.cfg.ProxySettings.GetProxySettings(ctx) if err != nil { return nil, trace.Wrap(err) } - response.Proxy = *proxyConfig - authPref, err := h.cfg.AccessPoint.GetAuthPreference(r.Context()) + authPref, err := h.cfg.AccessPoint.GetAuthPreference(ctx) if err != nil { return nil, trace.Wrap(err) } - response.Auth = webclient.AuthenticationSettings{SignatureAlgorithmSuite: authPref.GetSignatureAlgorithmSuite()} - - autoUpdateConfig, err := h.cfg.AccessPoint.GetAutoUpdateConfig(r.Context()) - // TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions. - if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) { - h.logger.ErrorContext(r.Context(), "failed to receive AutoUpdateConfig", "error", err) - } - // If we can't get the AU config or tools AU are not configured, we default to "disabled". - // This ensures we fail open and don't accidentally update agents if something is going wrong. - // If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource - // than changing this logic. - if autoUpdateConfig.GetSpec().GetTools() == nil { - response.AutoUpdate.ToolsMode = autoupdate.ToolsUpdateModeDisabled - } else { - response.AutoUpdate.ToolsMode = autoUpdateConfig.GetSpec().GetTools().GetMode() - } - - autoUpdateVersion, err := h.cfg.AccessPoint.GetAutoUpdateVersion(r.Context()) - // TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions. - if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) { - h.logger.ErrorContext(r.Context(), "failed to receive AutoUpdateVersion", "error", err) - } - // If we can't get the AU version or tools AU version is not specified, we default to the current proxy version. - // This ensures we always advertise a version compatible with the cluster. - if autoUpdateVersion.GetSpec().GetTools() == nil { - response.AutoUpdate.ToolsVersion = api.Version - } else { - response.AutoUpdate.ToolsVersion = autoUpdateVersion.GetSpec().GetTools().GetTargetVersion() - } - return &response, nil + return &webclient.PingResponse{ + Proxy: *proxyConfig, + Auth: webclient.AuthenticationSettings{SignatureAlgorithmSuite: authPref.GetSignatureAlgorithmSuite()}, + ServerVersion: teleport.Version, + MinClientVersion: teleport.MinClientVersion, + ClusterName: h.auth.clusterName, + Edition: modules.GetModules().BuildType(), + FIPS: modules.IsBoringBinary(), + AutoUpdate: h.automaticUpdateSettings(ctx), + }, nil }) if err != nil { return nil, trace.Wrap(err) } - - // If you need to modulate the response based on the request params (will need to do this for automatic updates) - // Do it here. return resp, nil } +// TODO: add the request as a parameter when we'll need to modulate the content based on the UUID and group +func (h *Handler) automaticUpdateSettings(ctx context.Context) webclient.AutoUpdateSettings { + autoUpdateConfig, err := h.cfg.AccessPoint.GetAutoUpdateConfig(ctx) + // TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions. + if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) { + h.logger.ErrorContext(ctx, "failed to receive AutoUpdateConfig", "error", err) + } + + autoUpdateVersion, err := h.cfg.AccessPoint.GetAutoUpdateVersion(ctx) + // TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions. + if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) { + h.logger.ErrorContext(ctx, "failed to receive AutoUpdateVersion", "error", err) + } + + return webclient.AutoUpdateSettings{ + ToolsMode: getToolsMode(autoUpdateConfig), + ToolsVersion: getToolsVersion(autoUpdateVersion), + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentVersion: getAgentVersion(autoUpdateVersion), + AgentAutoUpdate: agentShouldUpdate(autoUpdateConfig, autoUpdateVersion), + } +} + func (h *Handler) pingWithConnector(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { authClient := h.cfg.ProxyClient connectorName := p.ByName("connector") @@ -5154,3 +5153,59 @@ func readEtagFromAppHash(fs http.FileSystem) (string, error) { return etag, nil } + +func getToolsMode(config *autoupdatepb.AutoUpdateConfig) string { + // If we can't get the AU config or if AUs are not configured, we default to "disabled". + // This ensures we fail open and don't accidentally update agents if something is going wrong. + // If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource + // than changing this logic. + if config.GetSpec().GetTools() == nil { + return autoupdate.ToolsUpdateModeDisabled + } + return config.GetSpec().GetTools().GetMode() +} + +func getToolsVersion(version *autoupdatepb.AutoUpdateVersion) string { + // If we can't get the AU version or tools AU version is not specified, we default to the current proxy version. + // This ensures we always advertise a version compatible with the cluster. + if version.GetSpec().GetTools() == nil { + return api.Version + } + return version.GetSpec().GetTools().GetTargetVersion() +} + +func getAgentVersion(version *autoupdatepb.AutoUpdateVersion) string { + // If we can't get the AU version or tools AU version is not specified, we default to the current proxy version. + // This ensures we always advertise a version compatible with the cluster. + // TODO: read the version from the autoupdate_agent_rollout when the resource is implemented + if version.GetSpec().GetAgents() == nil { + return api.Version + } + + return version.GetSpec().GetAgents().GetTargetVersion() +} + +func agentShouldUpdate(config *autoupdatepb.AutoUpdateConfig, version *autoupdatepb.AutoUpdateVersion) bool { + // TODO: read the data from the autoupdate_agent_rollout when the resource is implemented + + // If we can't get the AU config or if AUs are not configured, we default to "disabled". + // This ensures we fail open and don't accidentally update agents if something is going wrong. + // If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource + // than changing this logic. + if config.GetSpec().GetAgents() == nil { + return false + } + if version.GetSpec().GetAgents() == nil { + return false + } + configMode := config.GetSpec().GetAgents().GetMode() + versionMode := version.GetSpec().GetAgents().GetMode() + + // We update only if both version and config agent modes are "enabled" + if configMode != autoupdate.AgentsUpdateModeEnabled || versionMode != autoupdate.AgentsUpdateModeEnabled { + return false + } + + scheduleName := version.GetSpec().GetAgents().GetSchedule() + return scheduleName == autoupdate.AgentsScheduleImmediate +} diff --git a/lib/web/apiserver_ping_test.go b/lib/web/apiserver_ping_test.go index 231c8625ffacd..5ce3720375c46 100644 --- a/lib/web/apiserver_ping_test.go +++ b/lib/web/apiserver_ping_test.go @@ -305,48 +305,110 @@ func TestPing_autoUpdateResources(t *testing.T) { { name: "resources not defined", expected: webclient.AutoUpdateSettings{ - ToolsVersion: api.Version, - ToolsMode: autoupdate.ToolsUpdateModeDisabled, + ToolsVersion: api.Version, + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, }, { - name: "enable auto update", + name: "enable tools auto update", config: &autoupdatev1pb.AutoUpdateConfigSpec{ Tools: &autoupdatev1pb.AutoUpdateConfigSpecTools{ Mode: autoupdate.ToolsUpdateModeEnabled, }, }, expected: webclient.AutoUpdateSettings{ - ToolsMode: autoupdate.ToolsUpdateModeEnabled, - ToolsVersion: api.Version, + ToolsMode: autoupdate.ToolsUpdateModeEnabled, + ToolsVersion: api.Version, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, cleanup: true, }, { - name: "no autoupdate tool config nor version", + name: "enable agent auto update, immediate schedule", + config: &autoupdatev1pb.AutoUpdateConfigSpec{ + Agents: &autoupdatev1pb.AutoUpdateConfigSpecAgents{ + Mode: autoupdate.AgentsUpdateModeEnabled, + Strategy: autoupdate.AgentsStrategyHaltOnError, + }, + }, + version: &autoupdatev1pb.AutoUpdateVersionSpec{ + Agents: &autoupdatev1pb.AutoUpdateVersionSpecAgents{ + Mode: autoupdate.AgentsUpdateModeEnabled, + StartVersion: "1.2.3", + TargetVersion: "1.2.4", + Schedule: autoupdate.AgentsScheduleImmediate, + }, + }, + expected: webclient.AutoUpdateSettings{ + ToolsVersion: api.Version, + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: true, + AgentVersion: "1.2.4", + }, + cleanup: true, + }, + { + name: "version enable agent auto update, but config disables them", + config: &autoupdatev1pb.AutoUpdateConfigSpec{ + Agents: &autoupdatev1pb.AutoUpdateConfigSpecAgents{ + Mode: autoupdate.AgentsUpdateModeDisabled, + Strategy: autoupdate.AgentsStrategyHaltOnError, + }, + }, + version: &autoupdatev1pb.AutoUpdateVersionSpec{ + Agents: &autoupdatev1pb.AutoUpdateVersionSpecAgents{ + Mode: autoupdate.AgentsUpdateModeEnabled, + StartVersion: "1.2.3", + TargetVersion: "1.2.4", + Schedule: autoupdate.AgentsScheduleImmediate, + }, + }, + expected: webclient.AutoUpdateSettings{ + ToolsVersion: api.Version, + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: "1.2.4", + }, + cleanup: true, + }, + { + name: "empty config and version", config: &autoupdatev1pb.AutoUpdateConfigSpec{}, version: &autoupdatev1pb.AutoUpdateVersionSpec{}, expected: webclient.AutoUpdateSettings{ - ToolsVersion: api.Version, - ToolsMode: autoupdate.ToolsUpdateModeDisabled, + ToolsVersion: api.Version, + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, cleanup: true, }, { - name: "set auto update version", + name: "set tools auto update version", version: &autoupdatev1pb.AutoUpdateVersionSpec{ Tools: &autoupdatev1pb.AutoUpdateVersionSpecTools{ TargetVersion: "1.2.3", }, }, expected: webclient.AutoUpdateSettings{ - ToolsVersion: "1.2.3", - ToolsMode: autoupdate.ToolsUpdateModeDisabled, + ToolsVersion: "1.2.3", + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, cleanup: true, }, { - name: "enable auto update and set version", + name: "enable tools auto update and set version", config: &autoupdatev1pb.AutoUpdateConfigSpec{ Tools: &autoupdatev1pb.AutoUpdateConfigSpecTools{ Mode: autoupdate.ToolsUpdateModeEnabled, @@ -358,8 +420,11 @@ func TestPing_autoUpdateResources(t *testing.T) { }, }, expected: webclient.AutoUpdateSettings{ - ToolsMode: autoupdate.ToolsUpdateModeEnabled, - ToolsVersion: "1.2.3", + ToolsMode: autoupdate.ToolsUpdateModeEnabled, + ToolsVersion: "1.2.3", + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, }, { @@ -375,8 +440,11 @@ func TestPing_autoUpdateResources(t *testing.T) { }, }, expected: webclient.AutoUpdateSettings{ - ToolsMode: autoupdate.ToolsUpdateModeDisabled, - ToolsVersion: "3.2.1", + ToolsMode: autoupdate.ToolsUpdateModeDisabled, + ToolsVersion: "3.2.1", + AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds, + AgentAutoUpdate: false, + AgentVersion: api.Version, }, }, } diff --git a/lib/web/apps.go b/lib/web/apps.go index 9dfefd4a3eb10..5e809d2df29e1 100644 --- a/lib/web/apps.go +++ b/lib/web/apps.go @@ -60,23 +60,7 @@ func (h *Handler) clusterAppsGet(w http.ResponseWriter, r *http.Request, p httpr page, err := apiclient.GetResourcePage[types.AppServerOrSAMLIdPServiceProvider](r.Context(), clt, req) if err != nil { - // If the error returned is due to types.KindAppOrSAMLIdPServiceProvider being unsupported, then fallback to attempting to just fetch types.AppServers. - // This is for backwards compatibility with leaf clusters that don't support this new type yet. - // DELETE IN 15.0 - if trace.IsNotImplemented(err) { - req, err = convertListResourcesRequest(r, types.KindAppServer) - if err != nil { - return nil, trace.Wrap(err) - } - appServerPage, err := apiclient.GetResourcePage[types.AppServer](r.Context(), clt, req) - if err != nil { - return nil, trace.Wrap(err) - } - // Convert the ResourcePage returned containing AppServers to a ResourcePage containing AppServerOrSAMLIdPServiceProviders. - page = appServerOrSPPageFromAppServerPage(appServerPage) - } else { - return nil, trace.Wrap(err) - } + return nil, trace.Wrap(err) } userGroups, err := apiclient.GetAllResources[types.UserGroup](r.Context(), clt, &proto.ListResourcesRequest{ @@ -449,28 +433,3 @@ func (h *Handler) proxyDNSNames() (dnsNames []string) { } return dnsNames } - -// appServerOrSPPageFromAppServerPage converts a ResourcePage containing AppServers to a ResourcePage containing AppServerOrSAMLIdPServiceProviders. -// DELETE IN 15.0 -// -//nolint:staticcheck // SA1019. To be deleted along with the API in 16.0. -func appServerOrSPPageFromAppServerPage(appServerPage apiclient.ResourcePage[types.AppServer]) apiclient.ResourcePage[types.AppServerOrSAMLIdPServiceProvider] { - resources := make([]types.AppServerOrSAMLIdPServiceProvider, len(appServerPage.Resources)) - - for i, appServer := range appServerPage.Resources { - // Create AppServerOrSAMLIdPServiceProvider object from appServer. - appServerOrSP := &types.AppServerOrSAMLIdPServiceProviderV1{ - Resource: &types.AppServerOrSAMLIdPServiceProviderV1_AppServer{ - AppServer: appServer.(*types.AppServerV3), - }, - } - - resources[i] = appServerOrSP - } - - return apiclient.ResourcePage[types.AppServerOrSAMLIdPServiceProvider]{ - Resources: resources, - Total: appServerPage.Total, - NextKey: appServerPage.NextKey, - } -} diff --git a/tool/tctl/common/plugin/entraid.go b/tool/tctl/common/plugin/entraid.go new file mode 100644 index 0000000000000..ea5010504ca9f --- /dev/null +++ b/tool/tctl/common/plugin/entraid.go @@ -0,0 +1,419 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package plugin + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/fatih/color" + "github.com/google/safetext/shsprintf" + "github.com/google/uuid" + "github.com/gravitational/trace" + + pluginspb "github.com/gravitational/teleport/api/gen/proto/go/teleport/plugins/v1" + "github.com/gravitational/teleport/api/types" + entraapiutils "github.com/gravitational/teleport/api/utils/entraid" + "github.com/gravitational/teleport/lib/integrations/azureoidc" + "github.com/gravitational/teleport/lib/utils/oidc" + "github.com/gravitational/teleport/lib/web/scripts/oneoff" +) + +var ( + bold = color.New(color.Bold).SprintFunc() + boldRed = color.New(color.Bold, color.FgRed).SprintFunc() + + step1Template = bold("Step 1: Run the Setup Script") + ` + +1. Open ` + bold("Azure Cloud Shell") + ` (Bash) [https://portal.azure.com/#cloudshell/] using ` + bold("Google Chrome") + ` or ` + bold("Safari") + ` for the best compatibility. +2. Upload the setup script in ` + boldRed("%s") + ` using the ` + bold("Upload") + ` button in the Cloud Shell toolbar. +3. Once uploaded, execute the script by running the following command: + $ bash %s + +` + bold("Important Considerations") + `: +- You must have ` + bold("Azure privileged administrator permissions") + ` to complete the integration. +- Ensure you're using the ` + bold("Bash") + ` environment in Cloud Shell. +- During the script execution, you'll be prompted to run 'az login' to authenticate with Azure. ` + bold("Teleport") + ` does not store or persist your credentials. +- ` + bold("Mozilla Firefox") + ` users may experience connectivity issues in Azure Cloud Shell; using Chrome or Safari is recommended. + +To rerun the script, type 'exit' to close and then restart the process. + +` + + step2Template = ` + +` + bold("Step 2: Input Tenant ID and Client ID") + ` + +With the output of Step 1, please copy and paste the following information: +` +) + +type entraArgs struct { + cmd *kingpin.CmdClause + authConnectorName string + defaultOwners []string + useSystemCredentials bool + accessGraph bool + force bool +} + +func (p *PluginsCommand) initInstallEntra(parent *kingpin.CmdClause) { + p.install.entraID.cmd = parent.Command("entraid", "Install an EntraId integration.") + cmd := p.install.entraID.cmd + cmd. + Flag("name", "Name of the plugin resource to create"). + Default("entra-id"). + StringVar(&p.install.name) + + cmd. + Flag("auth-connector-name", "Name of the SAML connector resource to create"). + Default("entra-id-default"). + StringVar(&p.install.entraID.authConnectorName) + + cmd. + Flag("use-system-credentials", "Uses system credentials instead of OIDC."). + BoolVar(&p.install.entraID.useSystemCredentials) + + cmd.Flag("default-owner", "List of Teleport users that are default owners for the imported access lists. Multiple flags allowed."). + Required(). + StringsVar(&p.install.entraID.defaultOwners) + + cmd. + Flag("access-graph", "Enables Access Graph cache build."). + Default("true"). + BoolVar(&p.install.entraID.accessGraph) + + cmd. + Flag("force", "Proceed with installation even if plugin already exists."). + Short('f'). + Default("false"). + BoolVar(&p.install.entraID.force) +} + +type entraSettings struct { + accessGraphCache *azureoidc.TAGInfoCache + clientID string + tenantID string +} + +var ( + errCancel = trace.BadParameter("operation canceled") +) + +func (p *PluginsCommand) entraSetupGuide(proxyPublicAddr string) (entraSettings, error) { + pwd, err := os.Getwd() + if err != nil { + return entraSettings{}, trace.Wrap(err, "failed to get working dir") + } + f, err := os.CreateTemp(pwd, "entraid-setup-*.sh") + if err != nil { + return entraSettings{}, trace.Wrap(err, "failed to create temp file") + } + + defer os.Remove(f.Name()) + + buildScript, err := buildScript(proxyPublicAddr, p.install.entraID) + if err != nil { + return entraSettings{}, trace.Wrap(err, "failed to build script") + } + + if _, err := f.Write([]byte(buildScript)); err != nil { + return entraSettings{}, trace.Wrap(err, "failed to write script to file") + } + + if err := f.Close(); err != nil { + return entraSettings{}, trace.Wrap(err, "failed to close file") + } + fileLoc := f.Name() + + fmt.Fprintf(os.Stdout, step1Template, fileLoc, filepath.Base(fileLoc)) + + op, err := readData(os.Stdin, os.Stdout, + `Once the script completes, type 'continue' to proceed, 'exit' to quit`, + func(input string) bool { + return input == "continue" || input == "exit" + }, "Invalid input. Please enter 'continue' or 'exit'.") + if err != nil { + return entraSettings{}, trace.Wrap(err, "failed to read operation") + } + if op == "exit" { // User chose to exit + return entraSettings{}, errCancel + } + + validUUID := func(input string) bool { + _, err := uuid.Parse(input) + return err == nil + } + + fmt.Fprint(os.Stdout, step2Template) + + var settings entraSettings + settings.tenantID, err = readData(os.Stdin, os.Stdout, "Enter the Tenant ID", validUUID, "Invalid Tenant ID") + if err != nil { + return settings, trace.Wrap(err, "failed to read Tenant ID") + } + + settings.clientID, err = readData(os.Stdin, os.Stdout, "Enter the Client ID", validUUID, "Invalid Client ID") + if err != nil { + return settings, trace.Wrap(err, "failed to read Client ID") + } + + if p.install.entraID.accessGraph { + dataValidator := func(input string) bool { + settings.accessGraphCache, err = readTAGCache(input) + return err == nil + } + _, err = readData(os.Stdin, os.Stdout, "Enter the Access Graph Cache file location", dataValidator, "File does not exist or is invalid") + if err != nil { + return settings, trace.Wrap(err, "failed to read Access Graph Cache file") + } + } + return settings, nil +} + +// InstallEntra is the entry point for the `tctl plugins install entraid` command. +// This function guides users through an interactive setup process to configure EntraID integration, +// directing them to execute a script in Azure Cloud Shell and provide the required configuration inputs. +// The script creates an Azure EntraID Enterprise Application, enabling SAML logins in Teleport with +// the following claims: +// - givenname: user.givenname +// - surname: user.surname +// - emailaddress: user.mail +// - name: user.userprincipalname +// - groups: user.groups +// Additionally, the script establishes a Trust Policy in the application to allow Teleport +// to be recognized as a credential issuer when system credentials are not used. +// If system credentials are present, the script will skip the Trust policy creation using +// system credentials for EntraID authentication. +// Finally, if no system credentials are in use, the script will set up an Azure OIDC integration +// in Teleport and a Teleport plugin to synchronize access lists from EntraID to Teleport. +func (p *PluginsCommand) InstallEntra(ctx context.Context, args installPluginArgs) error { + inputs := p.install + + proxyPublicAddr, err := getProxyPublicAddr(ctx, args.authClient) + if err != nil { + return trace.Wrap(err) + } + + settings, err := p.entraSetupGuide(proxyPublicAddr) + if err != nil { + if errors.Is(err, errCancel) { + return nil + } + return trace.Wrap(err) + } + + var tagSyncSettings *types.PluginEntraIDAccessGraphSettings + if settings.accessGraphCache != nil { + tagSyncSettings = &types.PluginEntraIDAccessGraphSettings{ + AppSsoSettingsCache: settings.accessGraphCache.AppSsoSettingsCache, + } + } + + saml, err := types.NewSAMLConnector(inputs.entraID.authConnectorName, types.SAMLConnectorSpecV2{ + AssertionConsumerService: strings.TrimRight(proxyPublicAddr, "/") + "/v1/webapi/saml/acs/" + inputs.entraID.authConnectorName, + AllowIDPInitiated: true, + // AttributesToRoles is required, but Entra ID does not have a default group (like Okta's "Everyone"), + // so we add a dummy claim that will always be fulfilled and map them to the "requester" role. + AttributesToRoles: []types.AttributeMapping{ + { + Name: "http://schemas.microsoft.com/ws/2008/06/identity/claims/groups", + Value: "*", + Roles: []string{"requester"}, + }, + }, + Display: "Entra ID", + EntityDescriptorURL: entraapiutils.FederationMetadataURL(settings.tenantID, settings.clientID), + }) + if err != nil { + return trace.Wrap(err, "failed to create SAML connector") + } + + if _, err = args.authClient.CreateSAMLConnector(ctx, saml); err != nil { + if !trace.IsAlreadyExists(err) || !inputs.entraID.force { + return trace.Wrap(err, "failed to create SAML connector") + } + if _, err = args.authClient.UpsertSAMLConnector(ctx, saml); err != nil { + return trace.Wrap(err, "failed to upsert SAML connector") + } + } + + if !inputs.entraID.useSystemCredentials { + integrationSpec, err := types.NewIntegrationAzureOIDC( + types.Metadata{Name: inputs.name}, + &types.AzureOIDCIntegrationSpecV1{ + TenantID: settings.tenantID, + ClientID: settings.clientID, + }, + ) + if err != nil { + return trace.Wrap(err, "failed to create Azure OIDC integration") + } + + if _, err = args.authClient.CreateIntegration(ctx, integrationSpec); err != nil { + if !trace.IsAlreadyExists(err) || !inputs.entraID.force { + return trace.Wrap(err, "failed to create Azure OIDC integration") + } + + integration, err := args.authClient.GetIntegration(ctx, integrationSpec.GetName()) + if err != nil { + return trace.Wrap(err, "failed to get Azure OIDC integration") + } + integration.SetAWSOIDCIntegrationSpec(integrationSpec.GetAWSOIDCIntegrationSpec()) + if _, err = args.authClient.UpdateIntegration(ctx, integration); err != nil { + return trace.Wrap(err, "failed to create Azure OIDC integration") + } + } + } + + credentialsSource := types.EntraIDCredentialsSource_ENTRAID_CREDENTIALS_SOURCE_OIDC + if inputs.entraID.useSystemCredentials { + credentialsSource = types.EntraIDCredentialsSource_ENTRAID_CREDENTIALS_SOURCE_SYSTEM_CREDENTIALS + } + req := &pluginspb.CreatePluginRequest{ + Plugin: &types.PluginV1{ + Metadata: types.Metadata{ + Name: inputs.name, + Labels: map[string]string{ + "teleport.dev/hosted-plugin": "true", + }, + }, + Spec: types.PluginSpecV1{ + Settings: &types.PluginSpecV1_EntraId{ + EntraId: &types.PluginEntraIDSettings{ + SyncSettings: &types.PluginEntraIDSyncSettings{ + DefaultOwners: inputs.entraID.defaultOwners, + SsoConnectorId: inputs.entraID.authConnectorName, + CredentialsSource: credentialsSource, + TenantId: settings.tenantID, + }, + AccessGraphSettings: tagSyncSettings, + }, + }, + }, + }, + } + + _, err = args.plugins.CreatePlugin(ctx, req) + if err != nil { + if !trace.IsAlreadyExists(err) || !inputs.entraID.force { + return trace.Wrap(err) + } + plugin := req.GetPlugin() + { + oldPlugin, err := args.plugins.GetPlugin(ctx, &pluginspb.GetPluginRequest{ + Name: inputs.name, + }) + if err != nil { + return trace.Wrap(err) + } + plugin.Metadata.Revision = oldPlugin.GetMetadata().Revision + } + if _, err = args.plugins.UpdatePlugin(ctx, &pluginspb.UpdatePluginRequest{ + Plugin: plugin, + }); err != nil { + return trace.Wrap(err) + } + } + + fmt.Printf("Successfully created EntraID plugin %q\n\n", p.install.name) + + return nil +} + +func buildScript(proxyPublicAddr string, entraCfg entraArgs) (string, error) { + // The script must execute the following command: + argsList := []string{ + "integration", "configure", "azure-oidc", + fmt.Sprintf("--proxy-public-addr=%s", shsprintf.EscapeDefaultContext(proxyPublicAddr)), + fmt.Sprintf("--auth-connector-name=%s", shsprintf.EscapeDefaultContext(entraCfg.authConnectorName)), + } + + if entraCfg.accessGraph { + argsList = append(argsList, "--access-graph") + } + + if entraCfg.useSystemCredentials { + argsList = append(argsList, "--skip-oidc-integration") + } + + script, err := oneoff.BuildScript(oneoff.OneOffScriptParams{ + TeleportArgs: strings.Join(argsList, " "), + SuccessMessage: "Success! You can now go back to the Teleport Web UI to use the integration with Azure.", + }) + if err != nil { + return "", trace.Wrap(err) + } + return script, nil +} + +func getProxyPublicAddr(ctx context.Context, authClient authClient) (string, error) { + pingResp, err := authClient.Ping(ctx) + if err != nil { + return "", trace.Wrap(err, "failed fetching cluster info") + } + proxyPublicAddr := pingResp.GetProxyPublicAddr() + oidcIssuer, err := oidc.IssuerFromPublicAddress(proxyPublicAddr, "") + return oidcIssuer, trace.Wrap(err) +} + +// readTAGCache reads the TAG cache file and returns the TAGInfoCache object. +// azureoidc.TAGInfoCache is a struct that contains the information necessary for Access Graph to analyze Azure SSO. +// It contains a list of AppID and their corresponding FederatedSsoV2 information. +func readTAGCache(fileLoc string) (*azureoidc.TAGInfoCache, error) { + if fileLoc == "" { + return nil, trace.BadParameter("no TAG cache file specified") + } + + file, err := os.Open(fileLoc) + if err != nil { + return nil, trace.Wrap(err) + } + defer file.Close() + + var result azureoidc.TAGInfoCache + if err := json.NewDecoder(file).Decode(&result); err != nil { + return nil, trace.Wrap(err) + } + + return &result, nil +} + +func readData(r io.Reader, w io.Writer, message string, validate func(string) bool, errorMessage string) (string, error) { + reader := bufio.NewReader(r) + for { + fmt.Fprintf(w, "%s: ", message) + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(input) // Clean up any extra newlines or spaces + + if !validate(input) { + fmt.Fprintf(w, "%s\n", errorMessage) + continue + } + return input, nil + } +} diff --git a/tool/tctl/common/plugin/plugins_command.go b/tool/tctl/common/plugin/plugins_command.go index ba6c92f7ae5a9..df8b9eeb4ed3b 100644 --- a/tool/tctl/common/plugin/plugins_command.go +++ b/tool/tctl/common/plugin/plugins_command.go @@ -49,10 +49,11 @@ func logErrorMessage(err error) slog.Attr { } type pluginInstallArgs struct { - cmd *kingpin.CmdClause - name string - okta oktaArgs - scim scimArgs + cmd *kingpin.CmdClause + name string + okta oktaArgs + scim scimArgs + entraID entraArgs } type scimArgs struct { @@ -98,6 +99,7 @@ func (p *PluginsCommand) initInstall(parent *kingpin.CmdClause, config *servicec p.initInstallOkta(p.install.cmd) p.initInstallSCIM(p.install.cmd) + p.initInstallEntra(p.install.cmd) } func (p *PluginsCommand) initInstallSCIM(parent *kingpin.CmdClause) { @@ -200,11 +202,18 @@ func (p *PluginsCommand) Cleanup(ctx context.Context, clusterAPI *authclient.Cli type authClient interface { GetSAMLConnector(ctx context.Context, id string, withSecrets bool) (types.SAMLConnector, error) + CreateSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) + UpsertSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) + CreateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) + GetIntegration(ctx context.Context, name string) (types.Integration, error) + UpdateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) Ping(ctx context.Context) (proto.PingResponse, error) } type pluginsClient interface { CreatePlugin(ctx context.Context, in *pluginsv1.CreatePluginRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + GetPlugin(ctx context.Context, in *pluginsv1.GetPluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) + UpdatePlugin(ctx context.Context, in *pluginsv1.UpdatePluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) } type installPluginArgs struct { @@ -310,6 +319,9 @@ func (p *PluginsCommand) TryRun(ctx context.Context, cmd string, client *authcli err = p.InstallOkta(ctx, args) case p.install.scim.cmd.FullCommand(): err = p.InstallSCIM(ctx, client) + case p.install.entraID.cmd.FullCommand(): + args := installPluginArgs{authClient: client, plugins: client.PluginsClient()} + err = p.InstallEntra(ctx, args) case p.delete.cmd.FullCommand(): err = p.Delete(ctx, client) default: diff --git a/tool/tctl/common/plugin/plugins_command_test.go b/tool/tctl/common/plugin/plugins_command_test.go index e42f21e26310f..9033311f3272c 100644 --- a/tool/tctl/common/plugin/plugins_command_test.go +++ b/tool/tctl/common/plugin/plugins_command_test.go @@ -449,6 +449,16 @@ func (m *mockPluginsClient) CreatePlugin(ctx context.Context, in *pluginsv1.Crea return result.Get(0).(*emptypb.Empty), result.Error(1) } +func (m *mockPluginsClient) GetPlugin(ctx context.Context, in *pluginsv1.GetPluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) { + result := m.Called(ctx, in, opts) + return result.Get(0).(*types.PluginV1), result.Error(1) +} + +func (m *mockPluginsClient) UpdatePlugin(ctx context.Context, in *pluginsv1.UpdatePluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) { + result := m.Called(ctx, in, opts) + return result.Get(0).(*types.PluginV1), result.Error(1) +} + type mockAuthClient struct { mock.Mock } @@ -457,6 +467,27 @@ func (m *mockAuthClient) GetSAMLConnector(ctx context.Context, id string, withSe result := m.Called(ctx, id, withSecrets) return result.Get(0).(types.SAMLConnector), result.Error(1) } +func (m *mockAuthClient) CreateSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) { + result := m.Called(ctx, connector) + return result.Get(0).(types.SAMLConnector), result.Error(1) +} +func (m *mockAuthClient) UpsertSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) { + result := m.Called(ctx, connector) + return result.Get(0).(types.SAMLConnector), result.Error(1) +} +func (m *mockAuthClient) CreateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) { + result := m.Called(ctx, ig) + return result.Get(0).(types.Integration), result.Error(1) +} +func (m *mockAuthClient) UpdateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) { + result := m.Called(ctx, ig) + return result.Get(0).(types.Integration), result.Error(1) +} + +func (m *mockAuthClient) GetIntegration(ctx context.Context, name string) (types.Integration, error) { + result := m.Called(ctx, name) + return result.Get(0).(types.Integration), result.Error(1) +} func (m *mockAuthClient) Ping(ctx context.Context) (proto.PingResponse, error) { result := m.Called(ctx) diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index c37e8805581e6..32a07121b63ca 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -904,7 +904,7 @@ func (rc *ResourceCommand) createDynamicWindowsDesktop(ctx context.Context, clie if !rc.force { return trace.AlreadyExists("application %q already exists", wd.GetName()) } - if _, err := dynamicDesktopClient.UpdateDynamicWindowsDesktop(ctx, wd); err != nil { + if _, err := dynamicDesktopClient.UpsertDynamicWindowsDesktop(ctx, wd); err != nil { return trace.Wrap(err) } fmt.Printf("dynamic windows desktop %q has been updated\n", wd.GetName()) diff --git a/tool/tctl/common/tctl.go b/tool/tctl/common/tctl.go index 48ad1f0b75b6d..448a459df1653 100644 --- a/tool/tctl/common/tctl.go +++ b/tool/tctl/common/tctl.go @@ -47,6 +47,7 @@ import ( "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/client/identityfile" libmfa "github.com/gravitational/teleport/lib/client/mfa" + "github.com/gravitational/teleport/lib/client/sso" "github.com/gravitational/teleport/lib/config" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/modules" @@ -257,6 +258,18 @@ func TryRun(commands []CLICommand, args []string) error { PromptConfig: *promptCfg, }) }) + client.SetSSOMFACeremonyConstructor(func(ctx context.Context) (mfa.SSOMFACeremony, error) { + rdConfig := sso.RedirectorConfig{ + ProxyAddr: proxyAddr, + } + + rd, err := sso.NewRedirector(rdConfig) + if err != nil { + return nil, trace.Wrap(err) + } + + return sso.NewCLIMFACeremony(rd), nil + }) // execute whatever is selected: var match bool diff --git a/tool/teleport/common/integration_configure.go b/tool/teleport/common/integration_configure.go index bfd762d1322ec..97f531910e45e 100644 --- a/tool/teleport/common/integration_configure.go +++ b/tool/teleport/common/integration_configure.go @@ -251,7 +251,7 @@ func onIntegrationConfAzureOIDCCmd(ctx context.Context, params config.Integratio fmt.Println("Teleport is setting up the Azure integration. This may take a few minutes.") - appID, tenantID, err := azureoidc.SetupEnterpriseApp(ctx, params.ProxyPublicAddr, params.AuthConnectorName) + appID, tenantID, err := azureoidc.SetupEnterpriseApp(ctx, params.ProxyPublicAddr, params.AuthConnectorName, params.SkipOIDCConfiguration) if err != nil { return trace.Wrap(err) } diff --git a/tool/teleport/common/teleport.go b/tool/teleport/common/teleport.go index 3ccaa6ad1928a..9cd4436c68680 100644 --- a/tool/teleport/common/teleport.go +++ b/tool/teleport/common/teleport.go @@ -552,6 +552,7 @@ func Run(options Options) (app *kingpin.Application, executedCommand string, con integrationConfAzureOIDCCmd.Flag("proxy-public-addr", "The public address of Teleport Proxy Service").Required().StringVar(&ccf.IntegrationConfAzureOIDCArguments.ProxyPublicAddr) integrationConfAzureOIDCCmd.Flag("auth-connector-name", "The name of Entra ID SAML Auth connector in Teleport.").Required().StringVar(&ccf.IntegrationConfAzureOIDCArguments.AuthConnectorName) integrationConfAzureOIDCCmd.Flag("access-graph", "Enable Access Graph integration.").BoolVar(&ccf.IntegrationConfAzureOIDCArguments.AccessGraphEnabled) + integrationConfAzureOIDCCmd.Flag("skip-oidc-integration", "Skip OIDC integration.").BoolVar(&ccf.IntegrationConfAzureOIDCArguments.SkipOIDCConfiguration) integrationConfSAMLIdP := integrationConfigureCmd.Command("samlidp", "Manage SAML IdP integrations.") integrationSAMLIdPGCPWorkforce := integrationConfSAMLIdP.Command("gcp-workforce", "Configures GCP Workforce Identity Federation pool and SAML provider.") diff --git a/tool/tsh/common/db.go b/tool/tsh/common/db.go index cf107f20dc8c2..83b126436792a 100644 --- a/tool/tsh/common/db.go +++ b/tool/tsh/common/db.go @@ -655,7 +655,6 @@ func maybeStartLocalProxy(ctx context.Context, cf *CLIConf, host := "localhost" cmdOpts := []dbcmd.ConnectCommandFunc{ dbcmd.WithLocalProxy(host, addr.Port(0), profile.CACertPathForCluster(rootClusterName)), - dbcmd.WithGetDatabaseFunc(dbInfo.getDatabaseForDBCmd), } if requires.tunnel { cmdOpts = append(cmdOpts, dbcmd.WithNoTLS()) @@ -779,7 +778,10 @@ func onDatabaseConnect(cf *CLIConf) error { if err != nil { return trace.Wrap(err) } - opts = append(opts, dbcmd.WithLogger(log)) + opts = append(opts, + dbcmd.WithLogger(log), + dbcmd.WithGetDatabaseFunc(dbInfo.getDatabaseForDBCmd), + ) if opts, err = maybeAddDBUserPassword(cf, tc, dbInfo, opts); err != nil { return trace.Wrap(err) diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go index 2164dea15d3ea..1adabe7b337c1 100644 --- a/tool/tsh/common/tsh.go +++ b/tool/tsh/common/tsh.go @@ -118,6 +118,8 @@ const ( mfaModePlatform = "platform" // mfaModeOTP utilizes only OTP devices. mfaModeOTP = "otp" + // mfaModeSSO utilizes only SSO devices. + mfaModeSSO = "sso" ) const ( @@ -766,7 +768,7 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { app.Flag("bind-addr", "Override host:port used when opening a browser for cluster logins").Envar(bindAddrEnvVar).StringVar(&cf.BindAddr) app.Flag("callback", "Override the base URL (host:port) of the link shown when opening a browser for cluster logins. Must be used with --bind-addr.").StringVar(&cf.CallbackAddr) app.Flag("browser-login", browserHelp).Hidden().Envar(browserEnvVar).StringVar(&cf.Browser) - modes := []string{mfaModeAuto, mfaModeCrossPlatform, mfaModePlatform, mfaModeOTP} + modes := []string{mfaModeAuto, mfaModeCrossPlatform, mfaModePlatform, mfaModeOTP, mfaModeSSO} app.Flag("mfa-mode", fmt.Sprintf("Preferred mode for MFA and Passwordless assertions (%v)", strings.Join(modes, ", "))). Default(mfaModeAuto). Envar(mfaModeEnvVar). @@ -4253,6 +4255,7 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err } c.AuthenticatorAttachment = mfaOpts.AuthenticatorAttachment c.PreferOTP = mfaOpts.PreferOTP + c.PreferSSO = mfaOpts.PreferSSO // If agent forwarding was specified on the command line enable it. c.ForwardAgent = options.ForwardAgent @@ -4434,6 +4437,7 @@ func (c *CLIConf) GetProfile() (*profile.Profile, error) { type mfaModeOpts struct { AuthenticatorAttachment wancli.AuthenticatorAttachment PreferOTP bool + PreferSSO bool } func parseMFAMode(mode string) (*mfaModeOpts, error) { @@ -4446,6 +4450,8 @@ func parseMFAMode(mode string) (*mfaModeOpts, error) { opts.AuthenticatorAttachment = wancli.AttachmentPlatform case mfaModeOTP: opts.PreferOTP = true + case mfaModeSSO: + opts.PreferSSO = true default: return nil, fmt.Errorf("invalid MFA mode: %q", mode) } diff --git a/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx b/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx index 4af93b381df8b..cd6dbfda043cd 100644 --- a/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx +++ b/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx @@ -156,7 +156,7 @@ export function SelectResource({ onSelect }: SelectResourceProps) { Select Resource To Add - Teleport can integrate into most, if not all of your infrastructure. + Teleport can integrate into most, if not all, of your infrastructure. Search for what resource you want to add.