diff --git a/.github/workflows/vercel-preview.yaml b/.github/workflows/vercel-preview.yaml
index 74e4dbce38232..f74b3086138c0 100644
--- a/.github/workflows/vercel-preview.yaml
+++ b/.github/workflows/vercel-preview.yaml
@@ -61,5 +61,5 @@ jobs:
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
- body: `🤖 Vercel preview here: ${previewUrl}/docs/ver/preview`
+ body: `🤖 Vercel preview here: ${previewUrl}/docs`
})
diff --git a/build.assets/macos/tsh/tsh.app/Contents/Library/LaunchDaemons/com.gravitational.teleport.tsh.vnetd.plist b/build.assets/macos/tsh/tsh.app/Contents/Library/LaunchDaemons/com.gravitational.teleport.tsh.vnetd.plist
index 5bd6b0884a1ce..6ee39856cb2af 100644
--- a/build.assets/macos/tsh/tsh.app/Contents/Library/LaunchDaemons/com.gravitational.teleport.tsh.vnetd.plist
+++ b/build.assets/macos/tsh/tsh.app/Contents/Library/LaunchDaemons/com.gravitational.teleport.tsh.vnetd.plist
@@ -22,5 +22,12 @@
/var/log/vnet.log
ThrottleInterval
5
+ EnvironmentVariables
+
+
+ TELEPORT_TOOLS_VERSION
+ off
+
diff --git a/build.assets/macos/tshdev/tsh.app/Contents/Library/LaunchDaemons/com.goteleport.tshdev.vnetd.plist b/build.assets/macos/tshdev/tsh.app/Contents/Library/LaunchDaemons/com.goteleport.tshdev.vnetd.plist
index 8b51b2a044191..90d6139ac96ad 100644
--- a/build.assets/macos/tshdev/tsh.app/Contents/Library/LaunchDaemons/com.goteleport.tshdev.vnetd.plist
+++ b/build.assets/macos/tshdev/tsh.app/Contents/Library/LaunchDaemons/com.goteleport.tshdev.vnetd.plist
@@ -22,5 +22,12 @@
/var/log/vnet.log
ThrottleInterval
5
+ EnvironmentVariables
+
+
+ TELEPORT_TOOLS_VERSION
+ off
+
diff --git a/docs/pages/connect-your-client/gui-clients.mdx b/docs/pages/connect-your-client/gui-clients.mdx
index 30bc8ef8b727e..3403430765eea 100644
--- a/docs/pages/connect-your-client/gui-clients.mdx
+++ b/docs/pages/connect-your-client/gui-clients.mdx
@@ -10,9 +10,20 @@ work with Teleport.
### Prerequisites
-(!docs/pages/includes/edition-prereqs-tabs.mdx!)
+- A running Teleport cluster. If you want to get started with Teleport, [sign
+ up](https://goteleport.com/signup) for a free trial or [set up a demo
+ environment](../admin-guides/deploy-a-cluster/linux-demo.mdx).
+
+- The `tsh` client tool. Visit [Installation](../installation.mdx) for instructions on downloading
+ `tsh`. See the [Using Teleport Connect](./teleport-connect.mdx) guide for a graphical desktop client
+ that includes `tsh`.
+
+- To check that you can connect to your Teleport cluster, sign in with `tsh login`. For example:
+
+ ```code
+ $ tsh login --proxy= --user=
+ ```
-- (!docs/pages/includes/tctl.mdx!)
- The Teleport Database Service configured to access a database. See one of our
[guides](../enroll-resources/database-access/guides/guides.mdx) for how to set up the Teleport
Database Service for your database.
diff --git a/docs/pages/enroll-resources/desktop-access/dynamic-registration.mdx b/docs/pages/enroll-resources/desktop-access/dynamic-registration.mdx
new file mode 100644
index 0000000000000..b9e6bfff889c3
--- /dev/null
+++ b/docs/pages/enroll-resources/desktop-access/dynamic-registration.mdx
@@ -0,0 +1,147 @@
+---
+title: Dynamic Windows Desktop Registration
+description: Register/unregister Windows desktops without restarting Teleport.
+---
+
+Dynamic Windows desktop registration allows Teleport administrators to register
+new Windows desktops (or update/unregister existing ones) without having to
+update the static configuration files read by Teleport Windows Desktop Service
+instances.
+
+Windows Desktop Service instances watch for updates from the Teleport Auth
+Service for `dynamic_windows_desktop` resources, each of which includes the
+information that the Windows Desktop Service needs to connect to a Windows
+desktop.
+
+## Required permissions
+
+In order to interact with dynamically registered Windows desktops, a user must have
+a Teleport role with permissions to manage `dynamic_windows_desktop` resources.
+
+In the following example, a role allows a user to perform all possible
+operations against `dynamic_windows_desktop` resources:
+
+```yaml
+allow:
+ rules:
+ - resources: [dynamic_windows_desktop]
+ verbs: [list, create, read, update, delete]
+```
+
+## Enabling dynamic registration
+
+To enable dynamic registration, include a `resources` section in your Windows Desktop
+Service configuration with a list of resource label selectors you'd like this
+service to monitor for registering:
+
+```yaml
+windows_desktop_service:
+ enabled: "yes"
+ resources:
+ - labels:
+ "*": "*"
+```
+
+You can use a wildcard selector to register all dynamic Windows desktop resources in the cluster
+on the Windows Desktop Service or provide a specific set of labels for a subset:
+
+```yaml
+resources:
+- labels:
+ "env": "prod"
+- labels:
+ "env": "test"
+```
+
+## Creating a dynamic_windows_desktop resource
+
+Configure Teleport to register a Windows desktop dynamically by creating an `dynamic_windows_desktop`
+resource. The following example configures Teleport for connecting to Windows desktop
+called `example` at `host1.example.com:3089`.
+
+```yaml
+kind: dynamic_windows_desktop
+version: v1
+metadata:
+ name: example
+ description: "Example desktop"
+ labels:
+ env: test
+spec:
+ addr: host1.example.com:3089
+ # non_ad should be true for logging with local Windows user and false for Active Directory users
+ non_ad: true
+ # domain specifies domain used for AD-joined machines
+ domain: ""
+
+ # Optional - ensures that all sessions use the same screen size,
+ # no matter what the size of the browser window is.
+ # Leave blank to use the size of the browser window.
+ screen_size:
+ width: 1024
+ height: 768
+```
+
+The user creating the dynamic Windows desktop needs to have a role with access
+to the Windows desktop labels and the `dynamic_windows_desktop` resource. In
+this example role the user can only create and maintain dynamic Windows desktops
+labeled `env: test`.
+
+```yaml
+kind: role
+version: v7
+metadata:
+ name: example
+spec:
+ allow:
+ windows_desktop_labels:
+ env: test
+ rules:
+ - resources: [dynamic_windows_desktop]
+ verbs: [list, create, read, update, delete]
+```
+
+To create a dynamic Windows desktop resource, run:
+
+
+
+
+ ```code
+ # Log in to your cluster with tsh so you can use tctl from your local machine.
+ # You can also run tctl on your Auth Service host without running "tsh login"
+ # first.
+ $ tsh login --proxy=teleport.example.com --user=myuser
+ $ tctl create dynamic_windows_desktop.yaml
+ ```
+
+
+
+
+ ```code
+ # Log in to your Teleport cluster so you can use tctl remotely.
+ $ tsh login --proxy=mytenant.teleport.sh --user=myuser
+ $ tctl create dynamic_windows_desktop.yaml
+ ```
+
+
+
+
+
+After the resource has been created, it will appear among the list of available
+Windows desktops (in the web UI) as long as at least one Windows Desktop Service
+instance picks it up according to its label selectors.
+
+To update an existing dynamic Windows desktop resource, run:
+
+```code
+$ tctl create -f dynamic_windows_desktop.yaml
+```
+
+If the updated resource's labels no longer match a particular Windows Desktop Service, it
+will unregister and stop routing traffic to it.
+
+To delete a dynamic Windows desktop resource, run:
+
+```code
+$ tctl rm dynamic_windows_desktop/example
+```
diff --git a/docs/pages/enroll-resources/desktop-access/introduction.mdx b/docs/pages/enroll-resources/desktop-access/introduction.mdx
index 93464b53a9324..38a9731f6c143 100644
--- a/docs/pages/enroll-resources/desktop-access/introduction.mdx
+++ b/docs/pages/enroll-resources/desktop-access/introduction.mdx
@@ -76,6 +76,7 @@ Windows-specific configuration settings, role-based permissions, and audit event
- [Role-Based Access Control for Desktops](./rbac.mdx)
- [Clipboard Sharing](../../reference/agent-services/desktop-access-reference/clipboard.mdx)
- [Directory Sharing](./directory-sharing.mdx)
+- [Dynamic Registration](./dynamic-registration.mdx)
- [Session Recording and Playback](../../reference/agent-services/desktop-access-reference/sessions.mdx)
- [Troubleshooting Desktop Access](./troubleshooting.mdx)
- [Desktop Access Audit Events Reference](../../reference/agent-services/desktop-access-reference/audit.mdx)
diff --git a/docs/pages/includes/config-reference/desktop-config.yaml b/docs/pages/includes/config-reference/desktop-config.yaml
index 8cc93c40be045..9afa984858495 100644
--- a/docs/pages/includes/config-reference/desktop-config.yaml
+++ b/docs/pages/includes/config-reference/desktop-config.yaml
@@ -109,7 +109,15 @@ windows_desktop_service:
# The key of the label will be "ldap/" + the value of the attribute.
label_attributes:
- location
- # Rules for applying labels to Windows hosts based on regular expressions
+
+ # (optional) configure a set of label selectors for dynamic registration.
+ # If specified, this service will monitor the cluster for dynamic_windows_desktop
+ # and automatically proxy connections for desktops with matching labels.
+ resources:
+ - labels:
+ "env": "dev"
+
+ # (optional) rules for applying labels to Windows hosts based on regular expressions
# matched against the host name. If multiple rules match, the desktop will
# get the union of all matching labels.
host_labels:
diff --git a/docs/pages/includes/helm-reference/zz_generated.tbot.mdx b/docs/pages/includes/helm-reference/zz_generated.tbot.mdx
index 036f40f8e6f8e..743030c0506b7 100644
--- a/docs/pages/includes/helm-reference/zz_generated.tbot.mdx
+++ b/docs/pages/includes/helm-reference/zz_generated.tbot.mdx
@@ -121,7 +121,7 @@ Ignored if `customConfig` is set.
| `string` | `"kubernetes"` |
`joinMethod` describes how tbot joins the Teleport cluster.
-See [the join method reference](../../join-methods.mdx) for a list fo supported values and detailed explanations.
+See [the join method reference](../../reference/join-methods.mdx) for a list fo supported values and detailed explanations.
Ignored if `customConfig` is set.
## `token`
diff --git a/docs/pages/includes/role-spec.mdx b/docs/pages/includes/role-spec.mdx
index 25c566f3fae0e..2ac34fad5fd79 100644
--- a/docs/pages/includes/role-spec.mdx
+++ b/docs/pages/includes/role-spec.mdx
@@ -397,7 +397,7 @@ spec:
# generates a role name from the value capture
roles: ['$1-admin']
- # Teleport can attach annotations to pending Access Requests. these
+ # Teleport can attach annotations to pending Access Requests. These
# annotations may be literals, or be variable interpolation expressions,
# effectively creating a means for propagating selected claims from an
# external identity provider to the plugin system.
@@ -435,7 +435,7 @@ spec:
# The different session kinds this policy applies to.
kinds: ['k8s', 'ssh']
# The list of session participant modes the role may join the session as.
- modes: ['moderator', 'observer']
+ modes: ['moderator', 'observer', 'peer']
# spiffe is a list of SPIFFE IDs that the role holder is allowed to request
# SVIDs for. As long as the request matches one of the blocks within the
diff --git a/docs/pages/reference/cli/tctl.mdx b/docs/pages/reference/cli/tctl.mdx
index 1e86e7e6bb25d..e7f65fd3017c8 100644
--- a/docs/pages/reference/cli/tctl.mdx
+++ b/docs/pages/reference/cli/tctl.mdx
@@ -1730,11 +1730,9 @@ These flags are available for all commands `--debug, --config`. Run
### Examples
```code
-# Adds teleport user "joe" with mappings to
-# OS users and {{ internal.logins }} to "joe" and "ubuntu"
-$ tctl users add joe --roles=access,requester joe,ubuntu
-# Adds Teleport user "joe" with mappings to the editor role
-$ tctl users add joe --roles=editor,reviewer
+# Adds Teleport user "joe" with the "access" and "requester" roles and
+# permissions to assume the "joe" and "ubuntu" logins
+$ tctl users add joe --roles=access,requester --logins=joe,ubuntu
```
## tctl users ls
diff --git a/docs/pages/reference/helm-reference/helm-reference.mdx b/docs/pages/reference/helm-reference/helm-reference.mdx
index 61f9efaaf7d02..dad5fabc38c13 100644
--- a/docs/pages/reference/helm-reference/helm-reference.mdx
+++ b/docs/pages/reference/helm-reference/helm-reference.mdx
@@ -15,6 +15,7 @@ layout: tocless-doc
Teleport Kubernetes Operator.
- [teleport-access-graph](teleport-access-graph.mdx): Deploy the
Teleport Policy Access Graph service.
+- [tbot](tbot.mdx): Deploy an instance of TBot, the [MachineID](../../enroll-resources/machine-id/introduction.mdx) agent.
- [teleport-plugin-event-handler](teleport-plugin-event-handler.mdx):
Deploy the Teleport Event Handler plugin which sends events and session logs
to Fluentd.
diff --git a/docs/pages/reference/helm-reference/tbot.mdx b/docs/pages/reference/helm-reference/tbot.mdx
new file mode 100644
index 0000000000000..e68ebfc5ae72a
--- /dev/null
+++ b/docs/pages/reference/helm-reference/tbot.mdx
@@ -0,0 +1,35 @@
+---
+title: tbot Chart Reference
+description: Values that can be set using the tbot Helm chart
+---
+
+This chart deploys an instance of the [MachineID](../../enroll-resources/machine-id/introduction.mdx) agent,
+TBot, into your Kubernetes cluster.
+
+To use it, you will need to know:
+
+- The address of your Teleport Proxy Service or Auth Service
+- The name of your Teleport cluster
+- The name of a join token configured for Machine ID and your Kubernetes cluster
+ as described in the [Machine ID on Kubernetes guide](../../enroll-resources/machine-id/deployment/kubernetes.mdx)
+
+By default, this chart is designed to use the `kubernetes` join method but it
+can be customized to use any delegated join method. We do not recommend that
+you use the `token` join method with this chart.
+
+## Minimal configuration
+
+This basic configuration will write a Teleport identity file to a secret in
+the deployment namespace called `test-output`.
+
+```yaml
+clusterName: "test.teleport.sh"
+teleportProxyAddress: "test.teleport.sh:443"
+defaultOutput:
+ secretName: "test-output"
+token: "my-token"
+```
+
+## Full reference
+
+(!docs/pages/includes/helm-reference/zz_generated.tbot.mdx!)
diff --git a/docs/pages/reference/resources.mdx b/docs/pages/reference/resources.mdx
index 90df80f6feefa..7705f6456d4c9 100644
--- a/docs/pages/reference/resources.mdx
+++ b/docs/pages/reference/resources.mdx
@@ -138,51 +138,26 @@ are required.*/}
|
kubernetes_groups:
- "system:masters"
kubernetes_labels:
env: ["dev"]
kubernetes_resources:
- kind: "namespace"
name: "foo"
| ⚠️ not supported | ⚠️ not supported | ✅ full access in namespace `foo`
❌ cannot access other namespaces |
-## Windows desktop
+## Windows desktops
In most cases, Teleport will register `windows_desktop` resources automatically
based on static hosts in your configuration file or via LDAP-based discovery.
-However, you can also manage `windows_desktop` resources manually with `tctl`.
-This can be useful for managing inventories of hosts that are not joined to
-an Active Directory domain.
+You can also use [dynamic
+registration](../enroll-resources/desktop-access/dynamic-registration.mdx) using
+`dynamic_windows_desktop` resources. This can be useful for managing inventories
+of hosts that are not joined to an Active Directory domain.
There are a few important considerations to keep in mind when registering
desktops this way:
1. The desktop's `addr` can be a hostname or IP address, and should include
the RDP port (typically 3389).
-1. The desktop's `host_id` should be set to the name of a
- `windows_desktop_service` that is capable of proxying remote desktop
- connections to the host. If you have multiple such services, you can create
- multiple `windows_desktop` resources with different `host_id` values.
1. If you intend to log in to the desktop with local Windows users you must set
`non_ad: true`. If you intend to log in with Active Directory users, leave
`non_ad` unset (or false), and specify the Active Directory domain in the
`domain` field.
-```yaml
-kind: windows_desktop
-metadata:
- name: desktop-without-ad
- labels:
- foo: bar
- baz: quux
-spec:
- host_id: 307e091b-7f6b-42e0-b78d-3362ad10b55d
- addr: 192.168.1.153:3389
- domain: ""
- non_ad: true
-
- # Optional - ensures that all sessions use the same screen size,
- # no matter what the size of the browser window is.
- # Leave blank to use the size of the browser window.
- screen_size:
- width: 1024
- height: 768
-version: v3
-```
-
## Login Rules
Login rules contain logic to transform SSO user traits during login.
@@ -191,7 +166,7 @@ Login rules contain logic to transform SSO user traits during login.
## Database object import rule
-Database object import rule define the labels to be applied to database objects imported into Teleport.
+Database object import rule define the labels to be applied to database objects imported into Teleport.
See [Database Access Controls](../enroll-resources/database-access/rbac.mdx) for more details.
diff --git a/examples/chart/tbot/values.yaml b/examples/chart/tbot/values.yaml
index 680fda9d1a7fb..bdc2670142648 100644
--- a/examples/chart/tbot/values.yaml
+++ b/examples/chart/tbot/values.yaml
@@ -70,7 +70,7 @@ outputs: []
services: []
# joinMethod(string) -- describes how tbot joins the Teleport cluster.
-# See [the join method reference](../../join-methods.mdx) for a list fo supported values and detailed explanations.
+# See [the join method reference](../../reference/join-methods.mdx) for a list fo supported values and detailed explanations.
# Ignored if `customConfig` is set.
joinMethod: "kubernetes"
@@ -226,4 +226,4 @@ extraArgs: []
# - name: HTTPS_PROXY
# value: "http://username:password@my.proxy.host:3128"
# ```
-extraEnv: []
\ No newline at end of file
+extraEnv: []
diff --git a/integration/autoupdate/tools/updater/main.go b/integration/autoupdate/tools/updater/main.go
index 775c7ab7b2e9d..8a12dbbd1c9f7 100644
--- a/integration/autoupdate/tools/updater/main.go
+++ b/integration/autoupdate/tools/updater/main.go
@@ -43,7 +43,6 @@ func main() {
ctx, _ = signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
updater := tools.NewUpdater(
- tools.DefaultClientTools(),
toolsDir,
version,
tools.WithBaseURL(baseURL),
diff --git a/integration/autoupdate/tools/updater_test.go b/integration/autoupdate/tools/updater_test.go
index d429a7483910a..ffb3a3300716c 100644
--- a/integration/autoupdate/tools/updater_test.go
+++ b/integration/autoupdate/tools/updater_test.go
@@ -49,7 +49,6 @@ func TestUpdate(t *testing.T) {
// Fetch compiled test binary with updater logic and install to $TELEPORT_HOME.
updater := tools.NewUpdater(
- tools.DefaultClientTools(),
toolsDir,
testVersions[0],
tools.WithBaseURL(baseURL),
@@ -91,7 +90,6 @@ func TestParallelUpdate(t *testing.T) {
// Initial fetch the updater binary un-archive and replace.
updater := tools.NewUpdater(
- tools.DefaultClientTools(),
toolsDir,
testVersions[0],
tools.WithBaseURL(baseURL),
@@ -165,7 +163,6 @@ func TestUpdateInterruptSignal(t *testing.T) {
// Initial fetch the updater binary un-archive and replace.
updater := tools.NewUpdater(
- tools.DefaultClientTools(),
toolsDir,
testVersions[0],
tools.WithBaseURL(baseURL),
diff --git a/lib/auth/bot_test.go b/lib/auth/bot_test.go
index a9f5a0d2e2cee..5ff53115b5374 100644
--- a/lib/auth/bot_test.go
+++ b/lib/auth/bot_test.go
@@ -565,7 +565,7 @@ func TestRegisterBot_RemoteAddr(t *testing.T) {
t.Run("Azure method", func(t *testing.T) {
subID := uuid.NewString()
resourceGroup := "rg"
- rsID := resourceID(subID, resourceGroup, "test-vm")
+ rsID := vmResourceID(subID, resourceGroup, "test-vm")
vmID := "vmID"
accessToken, err := makeToken(rsID, a.clock.Now())
@@ -585,13 +585,20 @@ func TestRegisterBot_RemoteAddr(t *testing.T) {
require.NoError(t, err)
require.NoError(t, a.UpsertToken(ctx, azureToken))
- vmClient := &mockAzureVMClient{vm: &azure.VirtualMachine{
- ID: rsID,
- Name: "test-vm",
- Subscription: subID,
- ResourceGroup: resourceGroup,
- VMID: vmID,
- }}
+ vmClient := &mockAzureVMClient{
+ vms: map[string]*azure.VirtualMachine{
+ rsID: {
+ ID: rsID,
+ Name: "test-vm",
+ Subscription: subID,
+ ResourceGroup: resourceGroup,
+ VMID: vmID,
+ },
+ },
+ }
+ getVMClient := makeVMClientGetter(map[string]*mockAzureVMClient{
+ subID: vmClient,
+ })
tlsConfig, err := fixtures.LocalTLSConfig()
require.NoError(t, err)
@@ -633,7 +640,7 @@ func TestRegisterBot_RemoteAddr(t *testing.T) {
AccessToken: accessToken,
}
return req, nil
- }, withCerts([]*x509.Certificate{tlsConfig.Certificate}), withVerifyFunc(mockVerifyToken(nil)), withVMClient(vmClient))
+ }, withCerts([]*x509.Certificate{tlsConfig.Certificate}), withVerifyFunc(mockVerifyToken(nil)), withVMClientGetter(getVMClient))
require.NoError(t, err)
checkCertLoginIP(t, certs.TLS, remoteAddr)
})
diff --git a/lib/auth/join_azure.go b/lib/auth/join_azure.go
index 9cee8259dd4c2..428ac15c90679 100644
--- a/lib/auth/join_azure.go
+++ b/lib/auth/join_azure.go
@@ -83,11 +83,13 @@ type accessTokenClaims struct {
type azureVerifyTokenFunc func(ctx context.Context, rawIDToken string) (*accessTokenClaims, error)
+type vmClientGetter func(subscriptionID string, token *azure.StaticCredential) (azure.VirtualMachinesClient, error)
+
type azureRegisterConfig struct {
clock clockwork.Clock
certificateAuthorities []*x509.Certificate
verify azureVerifyTokenFunc
- vmClient azure.VirtualMachinesClient
+ getVMClient vmClientGetter
}
func azureVerifyFuncFromOIDCVerifier(cfg *oidc.Config) azureVerifyTokenFunc {
@@ -140,6 +142,12 @@ func (cfg *azureRegisterConfig) CheckAndSetDefaults(ctx context.Context) error {
}
cfg.certificateAuthorities = certs
}
+ if cfg.getVMClient == nil {
+ cfg.getVMClient = func(subscriptionID string, token *azure.StaticCredential) (azure.VirtualMachinesClient, error) {
+ client, err := azure.NewVirtualMachinesClient(subscriptionID, token, nil)
+ return client, trace.Wrap(err)
+ }
+ }
return nil
}
@@ -148,42 +156,42 @@ type azureRegisterOption func(cfg *azureRegisterConfig)
// parseAndVeryAttestedData verifies that an attested data document was signed
// by Azure. If verification is successful, it returns the ID of the VM that
// produced the document.
-func parseAndVerifyAttestedData(ctx context.Context, adBytes []byte, challenge string, certs []*x509.Certificate) (string, error) {
+func parseAndVerifyAttestedData(ctx context.Context, adBytes []byte, challenge string, certs []*x509.Certificate) (subscriptionID, vmID string, err error) {
var signedAD signedAttestedData
if err := utils.FastUnmarshal(adBytes, &signedAD); err != nil {
- return "", trace.Wrap(err)
+ return "", "", trace.Wrap(err)
}
if signedAD.Encoding != "pkcs7" {
- return "", trace.AccessDenied("unsupported signature type: %v", signedAD.Encoding)
+ return "", "", trace.AccessDenied("unsupported signature type: %v", signedAD.Encoding)
}
sigPEM := "-----BEGIN PKCS7-----\n" + signedAD.Signature + "\n-----END PKCS7-----"
sigBER, _ := pem.Decode([]byte(sigPEM))
if sigBER == nil {
- return "", trace.AccessDenied("unable to decode attested data document")
+ return "", "", trace.AccessDenied("unable to decode attested data document")
}
p7, err := pkcs7.Parse(sigBER.Bytes)
if err != nil {
- return "", trace.Wrap(err)
+ return "", "", trace.Wrap(err)
}
var ad attestedData
if err := utils.FastUnmarshal(p7.Content, &ad); err != nil {
- return "", trace.Wrap(err)
+ return "", "", trace.Wrap(err)
}
if ad.Nonce != challenge {
- return "", trace.AccessDenied("challenge is missing or does not match")
+ return "", "", trace.AccessDenied("challenge is missing or does not match")
}
if len(p7.Certificates) == 0 {
- return "", trace.AccessDenied("no certificates for signature")
+ return "", "", trace.AccessDenied("no certificates for signature")
}
fixAzureSigningAlgorithm(p7)
// Azure only sends the leaf cert, so we have to fetch the intermediate.
intermediate, err := getAzureIssuerCert(ctx, p7.Certificates[0])
if err != nil {
- return "", trace.Wrap(err)
+ return "", "", trace.Wrap(err)
}
if intermediate != nil {
p7.Certificates = append(p7.Certificates, intermediate)
@@ -195,15 +203,15 @@ func parseAndVerifyAttestedData(ctx context.Context, adBytes []byte, challenge s
}
if err := p7.VerifyWithChain(pool); err != nil {
- return "", trace.Wrap(err)
+ return "", "", trace.Wrap(err)
}
- return ad.ID, nil
+ return ad.SubscriptionID, ad.ID, nil
}
// verifyVMIdentity verifies that the provided access token came from the
// correct Azure VM.
-func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken, vmID string, requestStart time.Time) (*azure.VirtualMachine, error) {
+func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken, subscriptionID, vmID string, requestStart time.Time) (*azure.VirtualMachine, error) {
tokenClaims, err := cfg.verify(ctx, accessToken)
if err != nil {
return nil, trace.Wrap(err)
@@ -231,24 +239,15 @@ func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken
return nil, trace.Wrap(err)
}
- rsID, err := arm.ParseResourceID(tokenClaims.ResourceID)
+ tokenCredential := azure.NewStaticCredential(azcore.AccessToken{
+ Token: accessToken,
+ ExpiresOn: tokenClaims.Expiry.Time(),
+ })
+ vmClient, err := cfg.getVMClient(subscriptionID, tokenCredential)
if err != nil {
return nil, trace.Wrap(err)
}
- vmClient := cfg.vmClient
- if vmClient == nil {
- tokenCredential := azure.NewStaticCredential(azcore.AccessToken{
- Token: accessToken,
- ExpiresOn: tokenClaims.Expiry.Time(),
- })
- var err error
- vmClient, err = azure.NewVirtualMachinesClient(rsID.SubscriptionID, tokenCredential, nil)
- if err != nil {
- return nil, trace.Wrap(err)
- }
- }
-
resourceID, err := arm.ParseResourceID(tokenClaims.ResourceID)
if err != nil {
return nil, trace.Wrap(err)
@@ -324,12 +323,12 @@ func (a *Server) checkAzureRequest(ctx context.Context, challenge string, req *p
return trace.AccessDenied("this token does not support the Azure join method")
}
- vmID, err := parseAndVerifyAttestedData(ctx, req.AttestedData, challenge, cfg.certificateAuthorities)
+ subID, vmID, err := parseAndVerifyAttestedData(ctx, req.AttestedData, challenge, cfg.certificateAuthorities)
if err != nil {
return trace.Wrap(err)
}
- vm, err := verifyVMIdentity(ctx, cfg, req.AccessToken, vmID, requestStart)
+ vm, err := verifyVMIdentity(ctx, cfg, req.AccessToken, subID, vmID, requestStart)
if err != nil {
return trace.Wrap(err)
}
diff --git a/lib/auth/join_azure_test.go b/lib/auth/join_azure_test.go
index 1e8af282de7ef..54a105fe0f0cf 100644
--- a/lib/auth/join_azure_test.go
+++ b/lib/auth/join_azure_test.go
@@ -54,23 +54,41 @@ func withVerifyFunc(verify azureVerifyTokenFunc) azureRegisterOption {
}
}
-func withVMClient(vmClient azure.VirtualMachinesClient) azureRegisterOption {
+func withVMClientGetter(getVMClient vmClientGetter) azureRegisterOption {
return func(cfg *azureRegisterConfig) {
- cfg.vmClient = vmClient
+ cfg.getVMClient = getVMClient
}
}
type mockAzureVMClient struct {
azure.VirtualMachinesClient
- vm *azure.VirtualMachine
+ vms map[string]*azure.VirtualMachine
}
-func (m *mockAzureVMClient) Get(_ context.Context, _ string) (*azure.VirtualMachine, error) {
- return m.vm, nil
+func (m *mockAzureVMClient) Get(_ context.Context, resourceID string) (*azure.VirtualMachine, error) {
+ vm, ok := m.vms[resourceID]
+ if !ok {
+ return nil, trace.NotFound("no vm with resource id %q", resourceID)
+ }
+ return vm, nil
+}
+
+func (m *mockAzureVMClient) GetByVMID(_ context.Context, resourceGroup, vmID string) (*azure.VirtualMachine, error) {
+ for _, vm := range m.vms {
+ if vm.VMID == vmID && (resourceGroup == types.Wildcard || vm.ResourceGroup == resourceGroup) {
+ return vm, nil
+ }
+ }
+ return nil, trace.NotFound("no vm in groups %q with id %q", resourceGroup, vmID)
}
-func (m *mockAzureVMClient) GetByVMID(_ context.Context, _, _ string) (*azure.VirtualMachine, error) {
- return m.vm, nil
+func makeVMClientGetter(clients map[string]*mockAzureVMClient) vmClientGetter {
+ return func(subscriptionID string, _ *azure.StaticCredential) (azure.VirtualMachinesClient, error) {
+ if client, ok := clients[subscriptionID]; ok {
+ return client, nil
+ }
+ return nil, trace.NotFound("no client for subscription %q", subscriptionID)
+ }
}
type azureChallengeResponseConfig struct {
@@ -85,10 +103,14 @@ func withChallengeAzure(challenge string) azureChallengeResponseOption {
}
}
-func resourceID(subscription, resourceGroup, name string) string {
+func vmResourceID(subscription, resourceGroup, name string) string {
+ return resourceID("virtualMachines", subscription, resourceGroup, name)
+}
+
+func resourceID(resourceType, subscription, resourceGroup, name string) string {
return fmt.Sprintf(
- "/subscriptions/%v/resourcegroups/%v/providers/Microsoft.Compute/virtualMachines/%v",
- subscription, resourceGroup, name,
+ "/subscriptions/%v/resourcegroups/%v/providers/Microsoft.Compute/%v/%v",
+ subscription, resourceGroup, resourceType, name,
)
}
@@ -161,43 +183,47 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
tlsPublicKey, err := PrivateKeyToPublicKeyTLS(sshPrivateKey)
require.NoError(t, err)
- isAccessDenied := func(t require.TestingT, err error, _ ...interface{}) {
+ isAccessDenied := func(t require.TestingT, err error, _ ...any) {
require.True(t, trace.IsAccessDenied(err), "expected Access Denied error, actual error: %v", err)
}
- isBadParameter := func(t require.TestingT, err error, _ ...interface{}) {
+ isBadParameter := func(t require.TestingT, err error, _ ...any) {
require.True(t, trace.IsBadParameter(err), "expected Bad Parameter error, actual error: %v", err)
}
+ isNotFound := func(t require.TestingT, err error, _ ...any) {
+ require.True(t, trace.IsNotFound(err), "expected Not Found error, actual error: %v", err)
+ }
- subID := uuid.NewString()
+ defaultSubscription := uuid.NewString()
+ defaultResourceGroup := "my-resource-group"
+ defaultName := "test-vm"
+ defaultVMID := "my-vm-id"
+ defaultResourceID := vmResourceID(defaultSubscription, defaultResourceGroup, defaultName)
tests := []struct {
name string
- subscription string
- resourceGroup string
- vmID string
- tokenName string
+ tokenResourceID string
+ tokenSubscription string
+ tokenVMID string
requestTokenName string
tokenSpec types.ProvisionTokenSpecV2
challengeResponseOptions []azureChallengeResponseOption
challengeResponseErr error
certs []*x509.Certificate
verify azureVerifyTokenFunc
- vmResult *azure.VirtualMachine
assertError require.ErrorAssertionFunc
}{
{
- name: "basic passing case",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "RG",
+ name: "basic passing case",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
- ResourceGroups: []string{"rg"},
+ Subscription: defaultSubscription,
+ ResourceGroups: []string{defaultResourceGroup},
},
},
},
@@ -208,18 +234,17 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: require.NoError,
},
{
- name: "resource group is case insensitive",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "my-RESOURCE-GROUP",
+ name: "resource group is case insensitive",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
- ResourceGroups: []string{"MY-resource-group"},
+ Subscription: defaultSubscription,
+ ResourceGroups: []string{"MY-resource-GROUP"},
},
},
},
@@ -230,17 +255,16 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: require.NoError,
},
{
- name: "wrong token",
- tokenName: "test-token",
- requestTokenName: "wrong-token",
- subscription: subID,
- resourceGroup: "RG",
+ name: "wrong token",
+ requestTokenName: "wrong-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: defaultSubscription,
},
},
},
@@ -251,17 +275,16 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: isAccessDenied,
},
{
- name: "challenge response error",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "RG",
+ name: "challenge response error",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: defaultSubscription,
},
},
},
@@ -273,17 +296,16 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: isBadParameter,
},
{
- name: "wrong subscription",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: "some-junk",
- resourceGroup: "RG",
+ name: "wrong subscription",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: "alternate-subscription-id",
},
},
},
@@ -294,18 +316,17 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: isAccessDenied,
},
{
- name: "wrong resource group",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "WRONG-RG",
+ name: "wrong resource group",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
- ResourceGroups: []string{"rg"},
+ Subscription: defaultSubscription,
+ ResourceGroups: []string{"alternate-resource-group"},
},
},
},
@@ -316,17 +337,16 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: isAccessDenied,
},
{
- name: "wrong challenge",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "RG",
+ name: "wrong challenge",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: defaultSubscription,
},
},
},
@@ -340,17 +360,16 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: isAccessDenied,
},
{
- name: "invalid signature",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "RG",
+ name: "invalid signature",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: defaultSubscription,
},
},
},
@@ -361,38 +380,94 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
assertError: require.Error,
},
{
- name: "attested data and access token from different VMs",
- tokenName: "test-token",
- requestTokenName: "test-token",
- subscription: subID,
- resourceGroup: "RG",
- vmID: "vm-id",
+ name: "attested data and access token from different VMs",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: "some-other-vm-id",
tokenSpec: types.ProvisionTokenSpecV2{
Roles: []types.SystemRole{types.RoleNode},
Azure: &types.ProvisionTokenSpecV2Azure{
Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
{
- Subscription: subID,
+ Subscription: defaultSubscription,
},
},
},
JoinMethod: types.JoinMethodAzure,
},
- vmResult: &azure.VirtualMachine{
- Subscription: subID,
- ResourceGroup: "RG",
- VMID: "different-id",
- },
verify: mockVerifyToken(nil),
certs: []*x509.Certificate{tlsConfig.Certificate},
assertError: isAccessDenied,
},
+ {
+ name: "vm not found",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
+ tokenResourceID: vmResourceID(defaultSubscription, "nonexistent-group", defaultName),
+ tokenSpec: types.ProvisionTokenSpecV2{
+ Roles: []types.SystemRole{types.RoleNode},
+ Azure: &types.ProvisionTokenSpecV2Azure{
+ Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
+ {
+ Subscription: defaultSubscription,
+ },
+ },
+ },
+ JoinMethod: types.JoinMethodAzure,
+ },
+ verify: mockVerifyToken(nil),
+ certs: []*x509.Certificate{tlsConfig.Certificate},
+ assertError: isNotFound,
+ },
+ {
+ name: "lookup vm by id",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
+ tokenResourceID: resourceID("some.other.provider", defaultSubscription, defaultResourceGroup, defaultName),
+ tokenSpec: types.ProvisionTokenSpecV2{
+ Roles: []types.SystemRole{types.RoleNode},
+ Azure: &types.ProvisionTokenSpecV2Azure{
+ Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
+ {
+ Subscription: defaultSubscription,
+ },
+ },
+ },
+ JoinMethod: types.JoinMethodAzure,
+ },
+ verify: mockVerifyToken(nil),
+ certs: []*x509.Certificate{tlsConfig.Certificate},
+ assertError: require.NoError,
+ },
+ {
+ name: "vm is in a different subscription than the token it provides",
+ requestTokenName: "test-token",
+ tokenSubscription: defaultSubscription,
+ tokenVMID: defaultVMID,
+ tokenResourceID: resourceID("some.other.provider", "some-other-subscription", defaultResourceGroup, defaultName),
+ tokenSpec: types.ProvisionTokenSpecV2{
+ Roles: []types.SystemRole{types.RoleNode},
+ Azure: &types.ProvisionTokenSpecV2Azure{
+ Allow: []*types.ProvisionTokenSpecV2Azure_Rule{
+ {
+ Subscription: defaultSubscription,
+ },
+ },
+ },
+ JoinMethod: types.JoinMethodAzure,
+ },
+ verify: mockVerifyToken(nil),
+ certs: []*x509.Certificate{tlsConfig.Certificate},
+ assertError: require.NoError,
+ },
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
token, err := types.NewProvisionTokenFromSpec(
- tc.tokenName,
+ "test-token",
time.Now().Add(time.Minute),
tc.tokenSpec)
require.NoError(t, err)
@@ -401,23 +476,28 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
require.NoError(t, a.DeleteToken(ctx, token.GetName()))
})
- rsID := resourceID(tc.subscription, tc.resourceGroup, "test-vm")
+ rsID := tc.tokenResourceID
+ if rsID == "" {
+ rsID = vmResourceID(defaultSubscription, defaultResourceGroup, defaultName)
+ }
accessToken, err := makeToken(rsID, a.clock.Now())
require.NoError(t, err)
- vmResult := tc.vmResult
- if vmResult == nil {
- vmResult = &azure.VirtualMachine{
- ID: rsID,
- Name: "test-vm",
- Subscription: tc.subscription,
- ResourceGroup: tc.resourceGroup,
- VMID: tc.vmID,
- }
+ vmClient := &mockAzureVMClient{
+ vms: map[string]*azure.VirtualMachine{
+ defaultResourceID: {
+ ID: defaultResourceID,
+ Name: defaultName,
+ Subscription: defaultSubscription,
+ ResourceGroup: defaultResourceGroup,
+ VMID: defaultVMID,
+ },
+ },
}
-
- vmClient := &mockAzureVMClient{vm: vmResult}
+ getVMClient := makeVMClientGetter(map[string]*mockAzureVMClient{
+ defaultSubscription: vmClient,
+ })
_, err = a.RegisterUsingAzureMethodWithOpts(context.Background(), func(challenge string) (*proto.RegisterUsingAzureMethodRequest, error) {
cfg := &azureChallengeResponseConfig{Challenge: challenge}
@@ -427,8 +507,8 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
ad := attestedData{
Nonce: cfg.Challenge,
- SubscriptionID: subID,
- ID: tc.vmID,
+ SubscriptionID: tc.tokenSubscription,
+ ID: tc.tokenVMID,
}
adBytes, err := json.Marshal(&ad)
require.NoError(t, err)
@@ -456,7 +536,7 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) {
AccessToken: accessToken,
}
return req, tc.challengeResponseErr
- }, withCerts(tc.certs), withVerifyFunc(tc.verify), withVMClient(vmClient))
+ }, withCerts(tc.certs), withVerifyFunc(tc.verify), withVMClientGetter(getVMClient))
tc.assertError(t, err)
})
}
diff --git a/lib/autoupdate/tools/updater.go b/lib/autoupdate/tools/updater.go
index 96352e34d9910..8bad07c395391 100644
--- a/lib/autoupdate/tools/updater.go
+++ b/lib/autoupdate/tools/updater.go
@@ -82,6 +82,13 @@ func WithClient(client *http.Client) Option {
}
}
+// WithTools defines custom list of the tools has to be installed by updater.
+func WithTools(tools []string) Option {
+ return func(u *Updater) {
+ u.tools = tools
+ }
+}
+
// Updater is updater implementation for the client tools auto updates.
type Updater struct {
toolsDir string
@@ -92,13 +99,14 @@ type Updater struct {
client *http.Client
}
-// NewUpdater initializes the updater for client tools auto updates. We need to specify the list
-// of tools (e.g., `tsh`, `tctl`) that should be updated, the tools directory path where we
-// download, extract package archives with the new version, and replace symlinks (e.g., `$TELEPORT_HOME/bin`).
-// The base URL of the CDN with Teleport packages and the `http.Client` can be customized via options.
-func NewUpdater(tools []string, toolsDir string, localVersion string, options ...Option) *Updater {
+// NewUpdater initializes the updater for client tools auto updates. We need to specify the tools directory
+// path where we download, extract package archives with the new version, and replace symlinks
+// (e.g., `$TELEPORT_HOME/bin`).
+// The base URL of the CDN with Teleport packages, the `http.Client` and the list of tools (e.g., `tsh`, `tctl`)
+// that must be updated can be customized via options.
+func NewUpdater(toolsDir, localVersion string, options ...Option) *Updater {
updater := &Updater{
- tools: tools,
+ tools: DefaultClientTools(),
toolsDir: toolsDir,
localVersion: localVersion,
baseURL: baseURL,
diff --git a/lib/client/api.go b/lib/client/api.go
index ef72a5fb574ee..012875f09b73c 100644
--- a/lib/client/api.go
+++ b/lib/client/api.go
@@ -74,7 +74,6 @@ import (
"github.com/gravitational/teleport/lib/auth/touchid"
wancli "github.com/gravitational/teleport/lib/auth/webauthncli"
"github.com/gravitational/teleport/lib/authz"
- "github.com/gravitational/teleport/lib/autoupdate/tools"
libmfa "github.com/gravitational/teleport/lib/client/mfa"
"github.com/gravitational/teleport/lib/client/sso"
"github.com/gravitational/teleport/lib/client/terminal"
@@ -96,7 +95,7 @@ import (
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/lib/utils/agentconn"
logutils "github.com/gravitational/teleport/lib/utils/log"
- "github.com/gravitational/teleport/lib/utils/signal"
+ "github.com/gravitational/teleport/tool/common/updater"
)
const (
@@ -710,38 +709,9 @@ func RetryWithRelogin(ctx context.Context, tc *TeleportClient, fn func() error,
return trace.Wrap(err)
}
- // The user has typed a command like `tsh ssh ...` without being logged in,
- // if the running binary needs to be updated, update and re-exec.
- //
- // If needed, download the new version of {tsh, tctl} and re-exec. Make
- // sure to exit this process with the same exit code as the child process.
- //
- toolsDir, err := tools.Dir()
- if err != nil {
+ if err := updater.CheckAndUpdateRemote(ctx, teleport.Version, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
return trace.Wrap(err)
}
- updater := tools.NewUpdater(tools.DefaultClientTools(), toolsDir, teleport.Version)
- toolsVersion, reExec, err := updater.CheckRemote(ctx, tc.WebProxyAddr, tc.InsecureSkipVerify)
- if err != nil {
- return trace.Wrap(err)
- }
- if reExec {
- ctxUpdate, cancel := signal.GetSignalHandler().NotifyContext(context.Background())
- defer cancel()
- // Download the version of client tools required by the cluster.
- err := updater.UpdateWithLock(ctxUpdate, toolsVersion)
- if err != nil && !errors.Is(err, context.Canceled) {
- utils.FatalError(err)
- }
- // Re-execute client tools with the correct version of client tools.
- code, err := updater.Exec()
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- log.Debugf("Failed to re-exec client tool: %v.", err)
- os.Exit(code)
- } else if err == nil {
- os.Exit(code)
- }
- }
if opt.afterLoginHook != nil {
if err := opt.afterLoginHook(); err != nil {
diff --git a/lib/events/filelog.go b/lib/events/filelog.go
index 8b0cf97b8602b..d6c39bce40b93 100644
--- a/lib/events/filelog.go
+++ b/lib/events/filelog.go
@@ -345,7 +345,11 @@ func getCheckpointFromEvent(event apievents.AuditEvent) (string, error) {
}
func (l *FileLog) SearchSessionEvents(ctx context.Context, req SearchSessionEventsRequest) ([]apievents.AuditEvent, string, error) {
- l.logger.DebugContext(ctx, "SearchSessionEvents", "from", req.From, "to", req.To, "order", req.Order, "limit", req.Limit, "cond", logutils.StringerAttr(req.Cond))
+ var whereExp types.WhereExpr
+ if req.Cond != nil {
+ whereExp = *req.Cond
+ }
+ l.logger.DebugContext(ctx, "SearchSessionEvents", "from", req.From, "to", req.To, "order", req.Order, "limit", req.Limit, "cond", logutils.StringerAttr(whereExp))
filter := searchEventsFilter{eventTypes: SessionRecordingEvents}
if req.Cond != nil {
condFn, err := utils.ToFieldsCondition(req.Cond)
diff --git a/lib/services/local/connection_diagnostic.go b/lib/services/local/connection_diagnostic.go
index 645318152b4c3..5ae2c11eb649d 100644
--- a/lib/services/local/connection_diagnostic.go
+++ b/lib/services/local/connection_diagnostic.go
@@ -85,7 +85,6 @@ func (s *ConnectionDiagnosticService) UpdateConnectionDiagnostic(ctx context.Con
}
// AppendDiagnosticTrace adds a Trace into the ConnectionDiagnostics.
-// It does a CompareAndSwap to ensure atomicity.
func (s *ConnectionDiagnosticService) AppendDiagnosticTrace(ctx context.Context, name string, t *types.ConnectionDiagnosticTrace) (types.ConnectionDiagnostic, error) {
existing, err := s.Get(ctx, backend.NewKey(connectionDiagnosticPrefix, name))
if err != nil {
@@ -115,7 +114,7 @@ func (s *ConnectionDiagnosticService) AppendDiagnosticTrace(ctx context.Context,
Revision: existing.Revision,
}
- _, err = s.CompareAndSwap(ctx, *existing, newItem)
+ _, err = s.ConditionalUpdate(ctx, newItem)
if err != nil {
return nil, trace.Wrap(err)
}
diff --git a/lib/services/local/dynamic_access.go b/lib/services/local/dynamic_access.go
index ff037e0cb81d8..e11aa5a51b16a 100644
--- a/lib/services/local/dynamic_access.go
+++ b/lib/services/local/dynamic_access.go
@@ -135,7 +135,7 @@ func (s *DynamicAccessService) SetAccessRequestState(ctx context.Context, params
if err != nil {
return nil, trace.Wrap(err)
}
- if _, err := s.CompareAndSwap(ctx, *item, newItem); err != nil {
+ if _, err := s.ConditionalUpdate(ctx, newItem); err != nil {
if trace.IsCompareFailed(err) {
select {
case <-retry.After():
@@ -195,7 +195,7 @@ func (s *DynamicAccessService) ApplyAccessReview(ctx context.Context, params typ
if err != nil {
return nil, trace.Wrap(err)
}
- if _, err := s.CompareAndSwap(ctx, *item, newItem); err != nil {
+ if _, err := s.ConditionalUpdate(ctx, newItem); err != nil {
if trace.IsCompareFailed(err) {
select {
case <-retry.After():
@@ -411,10 +411,8 @@ func (s *DynamicAccessService) CreateAccessRequestAllowedPromotions(ctx context.
if err != nil {
return trace.Wrap(err)
}
- // Currently, this logic is used only internally (no API exposed), and
- // there is only one place that calls it. If this ever changes, we will
- // need to do a CompareAndSwap here.
- if _, err := s.Put(ctx, item); err != nil {
+
+ if _, err := s.Create(ctx, item); err != nil {
return trace.Wrap(err)
}
return nil
diff --git a/lib/services/local/plugin_data.go b/lib/services/local/plugin_data.go
index 1ae324e1f5a8b..313de79ef206b 100644
--- a/lib/services/local/plugin_data.go
+++ b/lib/services/local/plugin_data.go
@@ -198,7 +198,7 @@ func (p *PluginDataService) updatePluginData(ctx context.Context, params types.P
return trace.Wrap(err)
}
} else {
- if _, err := p.CompareAndSwap(ctx, *item, newItem); err != nil {
+ if _, err := p.ConditionalUpdate(ctx, newItem); err != nil {
if trace.IsCompareFailed(err) {
select {
case <-retry.After():
diff --git a/lib/services/local/plugins.go b/lib/services/local/plugins.go
index 4f2bcfe7a1fa2..10a502e5382a7 100644
--- a/lib/services/local/plugins.go
+++ b/lib/services/local/plugins.go
@@ -246,7 +246,7 @@ func (s *PluginsService) updateAndSwap(ctx context.Context, name string, modify
return trace.Wrap(err)
}
- _, err = s.backend.CompareAndSwap(ctx, *item, backend.Item{
+ _, err = s.backend.ConditionalUpdate(ctx, backend.Item{
Key: backend.NewKey(pluginsPrefix, plugin.GetName()),
Value: value,
Expires: plugin.Expiry(),
diff --git a/lib/services/local/presence.go b/lib/services/local/presence.go
index 28525d1cde113..9ce5353e00aac 100644
--- a/lib/services/local/presence.go
+++ b/lib/services/local/presence.go
@@ -686,7 +686,7 @@ func (s *PresenceService) acquireSemaphore(ctx context.Context, key backend.Key,
if err != nil {
return nil, trace.Wrap(err)
}
- sem, err := services.UnmarshalSemaphore(item.Value)
+ sem, err := services.UnmarshalSemaphore(item.Value, services.WithRevision(item.Revision))
if err != nil {
return nil, trace.Wrap(err)
}
@@ -711,7 +711,7 @@ func (s *PresenceService) acquireSemaphore(ctx context.Context, key backend.Key,
Revision: rev,
}
- if _, err := s.CompareAndSwap(ctx, *item, newItem); err != nil {
+ if _, err := s.ConditionalUpdate(ctx, newItem); err != nil {
return nil, trace.Wrap(err)
}
return lease, nil
@@ -737,7 +737,7 @@ func (s *PresenceService) KeepAliveSemaphoreLease(ctx context.Context, lease typ
return trace.Wrap(err)
}
- sem, err := services.UnmarshalSemaphore(item.Value)
+ sem, err := services.UnmarshalSemaphore(item.Value, services.WithRevision(item.Revision))
if err != nil {
return trace.Wrap(err)
}
@@ -761,7 +761,7 @@ func (s *PresenceService) KeepAliveSemaphoreLease(ctx context.Context, lease typ
Revision: rev,
}
- _, err = s.CompareAndSwap(ctx, *item, newItem)
+ _, err = s.ConditionalUpdate(ctx, newItem)
if err != nil {
if trace.IsCompareFailed(err) {
return trace.CompareFailed("semaphore %v/%v has been concurrently updated, try again", sem.GetSubKind(), sem.GetName())
@@ -801,7 +801,7 @@ func (s *PresenceService) CancelSemaphoreLease(ctx context.Context, lease types.
return trace.Wrap(err)
}
- sem, err := services.UnmarshalSemaphore(item.Value)
+ sem, err := services.UnmarshalSemaphore(item.Value, services.WithRevision(item.Revision))
if err != nil {
return trace.Wrap(err)
}
@@ -823,7 +823,7 @@ func (s *PresenceService) CancelSemaphoreLease(ctx context.Context, lease types.
Revision: rev,
}
- _, err = s.CompareAndSwap(ctx, *item, newItem)
+ _, err = s.ConditionalUpdate(ctx, newItem)
switch {
case err == nil:
return nil
diff --git a/lib/services/local/sessiontracker.go b/lib/services/local/sessiontracker.go
index b8906b02d04ad..ad6fc5e9d06f1 100644
--- a/lib/services/local/sessiontracker.go
+++ b/lib/services/local/sessiontracker.go
@@ -32,11 +32,11 @@ import (
)
const (
- sessionPrefix = "session_tracker"
- retryDelay = time.Second
- terminatedTTL = 3 * time.Minute
- casRetryLimit = 7
- casErrorMessage = "CompareAndSwap reached retry limit"
+ sessionPrefix = "session_tracker"
+ retryDelay = time.Second
+ terminatedTTL = 3 * time.Minute
+ updateRetryLimit = 7
+ updateRetryLimitMessage = "Update retry limit reached"
)
type sessionTracker struct {
@@ -63,7 +63,7 @@ func (s *sessionTracker) loadSession(ctx context.Context, sessionID string) (typ
// UpdatePresence updates the presence status of a user in a session.
func (s *sessionTracker) UpdatePresence(ctx context.Context, sessionID, user string) error {
- for i := 0; i < casRetryLimit; i++ {
+ for i := 0; i < updateRetryLimit; i++ {
sessionItem, err := s.bk.Get(ctx, backend.NewKey(sessionPrefix, sessionID))
if err != nil {
return trace.Wrap(err)
@@ -89,7 +89,7 @@ func (s *sessionTracker) UpdatePresence(ctx context.Context, sessionID, user str
Expires: session.Expiry(),
Revision: sessionItem.Revision,
}
- _, err = s.bk.CompareAndSwap(ctx, *sessionItem, item)
+ _, err = s.bk.ConditionalUpdate(ctx, item)
if trace.IsCompareFailed(err) {
select {
case <-ctx.Done():
@@ -102,7 +102,7 @@ func (s *sessionTracker) UpdatePresence(ctx context.Context, sessionID, user str
return trace.Wrap(err)
}
- return trace.CompareFailed(casErrorMessage)
+ return trace.CompareFailed(updateRetryLimitMessage)
}
// GetSessionTracker returns the current state of a session tracker for an active session.
@@ -202,7 +202,7 @@ func (s *sessionTracker) CreateSessionTracker(ctx context.Context, tracker types
// UpdateSessionTracker updates a tracker resource for an active session.
func (s *sessionTracker) UpdateSessionTracker(ctx context.Context, req *proto.UpdateSessionTrackerRequest) error {
- for i := 0; i < casRetryLimit; i++ {
+ for i := 0; i < updateRetryLimit; i++ {
sessionItem, err := s.bk.Get(ctx, backend.NewKey(sessionPrefix, req.SessionID))
if err != nil {
return trace.Wrap(err)
@@ -268,7 +268,7 @@ func (s *sessionTracker) UpdateSessionTracker(ctx context.Context, req *proto.Up
Expires: expiry,
Revision: sessionItem.Revision,
}
- _, err = s.bk.CompareAndSwap(ctx, *sessionItem, item)
+ _, err = s.bk.ConditionalUpdate(ctx, item)
if trace.IsCompareFailed(err) {
select {
case <-ctx.Done():
@@ -281,7 +281,7 @@ func (s *sessionTracker) UpdateSessionTracker(ctx context.Context, req *proto.Up
return trace.Wrap(err)
}
- return trace.CompareFailed(casErrorMessage)
+ return trace.CompareFailed(updateRetryLimitMessage)
}
// RemoveSessionTracker removes a tracker resource for an active session.
diff --git a/lib/services/local/unstable.go b/lib/services/local/unstable.go
index de3731fff13d7..15643e4e73b47 100644
--- a/lib/services/local/unstable.go
+++ b/lib/services/local/unstable.go
@@ -76,7 +76,8 @@ func (s UnstableService) AssertSystemRole(ctx context.Context, req proto.SystemR
Expires: time.Now().Add(assertionTTL).UTC(),
}
if item != nil {
- _, err = s.CompareAndSwap(ctx, *item, newItem)
+ newItem.Revision = item.Revision
+ _, err = s.ConditionalUpdate(ctx, newItem)
if trace.IsCompareFailed(err) {
// nodes are expected to perform assertions sequentially
return trace.CompareFailed("system role assertion set was concurrently modified (this is bug)")
diff --git a/lib/srv/desktop/discovery_test.go b/lib/srv/desktop/discovery_test.go
index 01941e02d0056..33b257eeefef8 100644
--- a/lib/srv/desktop/discovery_test.go
+++ b/lib/srv/desktop/discovery_test.go
@@ -30,6 +30,7 @@ import (
"github.com/go-ldap/ldap/v3"
"github.com/jonboulle/clockwork"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/gravitational/teleport/api/types"
@@ -268,37 +269,46 @@ func TestDynamicWindowsDiscovery(t *testing.T) {
_, err = dynamicWindowsClient.CreateDynamicWindowsDesktop(ctx, desktop)
require.NoError(t, err)
- time.Sleep(10 * time.Millisecond)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ desktops, err := client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Len(t, desktops, testCase.expected) {
+ return
+ }
- desktops, err := client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
- require.NoError(t, err)
- require.Len(t, desktops, testCase.expected)
- if testCase.expected > 0 {
- require.Equal(t, desktop.GetName(), desktops[0].GetName())
- require.Equal(t, desktop.GetAddr(), desktops[0].GetAddr())
- }
+ if testCase.expected > 0 {
+ assert.Equal(t, desktop.GetName(), desktops[0].GetName())
+ assert.Equal(t, desktop.GetAddr(), desktops[0].GetAddr())
+ }
+ }, 5*time.Second, 50*time.Millisecond)
desktop.Spec.Addr = "addr2"
_, err = dynamicWindowsClient.UpsertDynamicWindowsDesktop(ctx, desktop)
require.NoError(t, err)
- time.Sleep(10 * time.Millisecond)
- desktops, err = client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
- require.NoError(t, err)
- require.Len(t, desktops, testCase.expected)
- if testCase.expected > 0 {
- require.Equal(t, desktop.GetName(), desktops[0].GetName())
- require.Equal(t, desktop.GetAddr(), desktops[0].GetAddr())
- }
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ desktops, err := client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Len(t, desktops, testCase.expected) {
+ return
+ }
+ if testCase.expected > 0 {
+ assert.Equal(t, desktop.GetName(), desktops[0].GetName())
+ assert.Equal(t, desktop.GetAddr(), desktops[0].GetAddr())
+ }
+ }, 5*time.Second, 50*time.Millisecond)
require.NoError(t, dynamicWindowsClient.DeleteDynamicWindowsDesktop(ctx, "test"))
- time.Sleep(10 * time.Millisecond)
-
- desktops, err = client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
- require.NoError(t, err)
- require.Empty(t, desktops)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ desktops, err := client.GetWindowsDesktops(ctx, types.WindowsDesktopFilter{})
+ assert.NoError(t, err)
+ assert.Empty(t, desktops)
+ }, 5*time.Second, 50*time.Millisecond)
})
-
}
}
diff --git a/lib/srv/desktop/windows_server.go b/lib/srv/desktop/windows_server.go
index fd75cbc89bd04..7196425b79bff 100644
--- a/lib/srv/desktop/windows_server.go
+++ b/lib/srv/desktop/windows_server.go
@@ -462,7 +462,7 @@ func (s *WindowsService) startLDAPConnectionCheck(ctx context.Context) {
}
s.mu.Unlock()
- // If we have initizlied the LDAP client, then try to use it to make sure we're still connected
+ // If we have initialized the LDAP client, then try to use it to make sure we're still connected
// by attempting to read CAs in the NTAuth store (we know we have permissions to do so).
ntAuthDN := "CN=NTAuthCertificates,CN=Public Key Services,CN=Services,CN=Configuration," + s.cfg.LDAPConfig.DomainDN()
_, err := s.lc.Read(ntAuthDN, "certificationAuthority", []string{"cACertificate"})
diff --git a/tool/common/updater/client_tools.go b/tool/common/updater/client_tools.go
new file mode 100644
index 0000000000000..6949664b35b7d
--- /dev/null
+++ b/tool/common/updater/client_tools.go
@@ -0,0 +1,112 @@
+/*
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+package updater
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "os"
+
+ "github.com/gravitational/trace"
+
+ "github.com/gravitational/teleport/lib/autoupdate/tools"
+ stacksignal "github.com/gravitational/teleport/lib/utils/signal"
+)
+
+// CheckAndUpdateLocal verifies if the TELEPORT_TOOLS_VERSION environment variable
+// is set and a version is defined (or disabled by setting it to "off"). The requested
+// version is compared with the current client tools version. If they differ, the version
+// package is downloaded, extracted to the client tools directory, and re-executed
+// with the updated version.
+// If $TELEPORT_HOME/bin contains downloaded client tools, it always re-executes
+// using the version from the home directory.
+func CheckAndUpdateLocal(ctx context.Context, currentVersion string) error {
+ toolsDir, err := tools.Dir()
+ if err != nil {
+ slog.WarnContext(ctx, "Client tools update is disabled", "error", err)
+ return nil
+ }
+ updater := tools.NewUpdater(toolsDir, currentVersion)
+ // At process startup, check if a version has already been downloaded to
+ // $TELEPORT_HOME/bin or if the user has set the TELEPORT_TOOLS_VERSION
+ // environment variable. If so, re-exec that version of client tools.
+ toolsVersion, reExec, err := updater.CheckLocal()
+ if err != nil {
+ return trace.Wrap(err)
+ }
+ if reExec {
+ return trace.Wrap(updateAndReExec(ctx, updater, toolsVersion))
+ }
+
+ return nil
+}
+
+// CheckAndUpdateRemote verifies client tools version is set for update in cluster
+// configuration by making the http request to `webapi/find` endpoint. The requested
+// version is compared with the current client tools version. If they differ, the version
+// package is downloaded, extracted to the client tools directory, and re-executed
+// with the updated version.
+// If $TELEPORT_HOME/bin contains downloaded client tools, it always re-executes
+// using the version from the home directory.
+func CheckAndUpdateRemote(ctx context.Context, currentVersion string, proxy string, insecure bool) error {
+ toolsDir, err := tools.Dir()
+ if err != nil {
+ slog.WarnContext(ctx, "Client tools update is disabled", "error", err)
+ return nil
+ }
+ updater := tools.NewUpdater(toolsDir, currentVersion)
+ // The user has typed a command like `tsh ssh ...` without being logged in,
+ // if the running binary needs to be updated, update and re-exec.
+ //
+ // If needed, download the new version of client tools and re-exec. Make
+ // sure to exit this process with the same exit code as the child process.
+ toolsVersion, reExec, err := updater.CheckRemote(ctx, proxy, insecure)
+ if err != nil {
+ return trace.Wrap(err)
+ }
+ if reExec {
+ return trace.Wrap(updateAndReExec(ctx, updater, toolsVersion))
+ }
+
+ return nil
+}
+
+func updateAndReExec(ctx context.Context, updater *tools.Updater, toolsVersion string) error {
+ ctxUpdate, cancel := stacksignal.GetSignalHandler().NotifyContext(ctx)
+ defer cancel()
+ // Download the version of client tools required by the cluster. This
+ // is required if the user passed in the TELEPORT_TOOLS_VERSION
+ // explicitly.
+ err := updater.UpdateWithLock(ctxUpdate, toolsVersion)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ return trace.Wrap(err)
+ }
+
+ // Re-execute client tools with the correct version of client tools.
+ code, err := updater.Exec()
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ slog.DebugContext(ctx, "Failed to re-exec client tool", "error", err)
+ os.Exit(code)
+ } else if err == nil {
+ os.Exit(code)
+ }
+
+ return nil
+}
diff --git a/tool/tctl/common/tctl.go b/tool/tctl/common/tctl.go
index 5af22702f8b17..fa987c6031ea3 100644
--- a/tool/tctl/common/tctl.go
+++ b/tool/tctl/common/tctl.go
@@ -44,7 +44,6 @@ import (
"github.com/gravitational/teleport/lib/auth/authclient"
"github.com/gravitational/teleport/lib/auth/state"
"github.com/gravitational/teleport/lib/auth/storage"
- "github.com/gravitational/teleport/lib/autoupdate/tools"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/client/identityfile"
libmfa "github.com/gravitational/teleport/lib/client/mfa"
@@ -56,8 +55,8 @@ import (
"github.com/gravitational/teleport/lib/service/servicecfg"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/lib/utils/hostid"
- "github.com/gravitational/teleport/lib/utils/signal"
"github.com/gravitational/teleport/tool/common"
+ "github.com/gravitational/teleport/tool/common/updater"
)
const (
@@ -108,42 +107,11 @@ type CLICommand interface {
//
// distribution: name of the Teleport distribution
func Run(ctx context.Context, commands []CLICommand) {
- // The user has typed a command like `tsh ssh ...` without being logged in,
- // if the running binary needs to be updated, update and re-exec.
- //
- // If needed, download the new version of {tsh, tctl} and re-exec. Make
- // sure to exit this process with the same exit code as the child process.
- //
- toolsDir, err := tools.Dir()
- if err != nil {
- utils.FatalError(err)
- }
- updater := tools.NewUpdater(tools.DefaultClientTools(), toolsDir, teleport.Version)
- toolsVersion, reExec, err := updater.CheckLocal()
- if err != nil {
+ if err := updater.CheckAndUpdateLocal(ctx, teleport.Version); err != nil {
utils.FatalError(err)
}
- if reExec {
- ctxUpdate, cancel := signal.GetSignalHandler().NotifyContext(ctx)
- defer cancel()
- // Download the version of client tools required by the cluster. This
- // is required if the user passed in the TELEPORT_TOOLS_VERSION
- // explicitly.
- err := updater.UpdateWithLock(ctxUpdate, toolsVersion)
- if err != nil && !errors.Is(err, context.Canceled) {
- utils.FatalError(err)
- }
- // Re-execute client tools with the correct version of client tools.
- code, err := updater.Exec()
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- log.Debugf("Failed to re-exec client tool: %v.", err)
- os.Exit(code)
- } else if err == nil {
- os.Exit(code)
- }
- }
- err = TryRun(commands, os.Args[1:])
+ err := TryRun(commands, os.Args[1:])
if err != nil {
var exitError *common.ExitCodeError
if errors.As(err, &exitError) {
diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go
index 0ef322caa460a..a3523876116e4 100644
--- a/tool/tsh/common/tsh.go
+++ b/tool/tsh/common/tsh.go
@@ -73,7 +73,6 @@ import (
"github.com/gravitational/teleport/lib/asciitable"
"github.com/gravitational/teleport/lib/auth/authclient"
wancli "github.com/gravitational/teleport/lib/auth/webauthncli"
- "github.com/gravitational/teleport/lib/autoupdate/tools"
"github.com/gravitational/teleport/lib/benchmark"
benchmarkdb "github.com/gravitational/teleport/lib/benchmark/db"
"github.com/gravitational/teleport/lib/client"
@@ -98,6 +97,7 @@ import (
"github.com/gravitational/teleport/tool/common"
"github.com/gravitational/teleport/tool/common/fido2"
"github.com/gravitational/teleport/tool/common/touchid"
+ "github.com/gravitational/teleport/tool/common/updater"
"github.com/gravitational/teleport/tool/common/webauthnwin"
)
@@ -708,37 +708,9 @@ func initLogger(cf *CLIConf) {
//
// DO NOT RUN TESTS that call Run() in parallel (unless you taken precautions).
func Run(ctx context.Context, args []string, opts ...CliOption) error {
- // At process startup, check if a version has already been downloaded to
- // $TELEPORT_HOME/bin or if the user has set the TELEPORT_TOOLS_VERSION
- // environment variable. If so, re-exec that version of {tsh, tctl}.
- toolsDir, err := tools.Dir()
- if err != nil {
- return trace.Wrap(err)
- }
- updater := tools.NewUpdater(tools.DefaultClientTools(), toolsDir, teleport.Version)
- toolsVersion, reExec, err := updater.CheckLocal()
- if err != nil {
+ if err := updater.CheckAndUpdateLocal(ctx, teleport.Version); err != nil {
return trace.Wrap(err)
}
- if reExec {
- ctxUpdate, cancel := stacksignal.GetSignalHandler().NotifyContext(ctx)
- defer cancel()
- // Download the version of client tools required by the cluster. This
- // is required if the user passed in the TELEPORT_TOOLS_VERSION
- // explicitly.
- err := updater.UpdateWithLock(ctxUpdate, toolsVersion)
- if err != nil && !errors.Is(err, context.Canceled) {
- return trace.Wrap(err)
- }
- // Re-execute client tools with the correct version of client tools.
- code, err := updater.Exec()
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- log.Debugf("Failed to re-exec client tool: %v.", err)
- os.Exit(code)
- } else if err == nil {
- os.Exit(code)
- }
- }
cf := CLIConf{
Context: ctx,
@@ -1273,6 +1245,7 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error {
bench.Hidden()
}
+ var err error
cf.executablePath, err = os.Executable()
if err != nil {
return trace.Wrap(err)
@@ -1901,7 +1874,7 @@ func onLogin(cf *CLIConf) error {
// The user is not logged in and has typed in `tsh --proxy=... login`, if
// the running binary needs to be updated, update and re-exec.
if profile == nil {
- if err := updateAndRun(cf.Context, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
+ if err := updater.CheckAndUpdateRemote(cf.Context, teleport.Version, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
return trace.Wrap(err)
}
}
@@ -1919,7 +1892,7 @@ func onLogin(cf *CLIConf) error {
// The user has typed `tsh login`, if the running binary needs to
// be updated, update and re-exec.
- if err := updateAndRun(cf.Context, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
+ if err := updater.CheckAndUpdateRemote(cf.Context, teleport.Version, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
return trace.Wrap(err)
}
@@ -1939,7 +1912,7 @@ func onLogin(cf *CLIConf) error {
// The user has typed `tsh login`, if the running binary needs to
// be updated, update and re-exec.
- if err := updateAndRun(cf.Context, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
+ if err := updater.CheckAndUpdateRemote(cf.Context, teleport.Version, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
return trace.Wrap(err)
}
@@ -2015,7 +1988,7 @@ func onLogin(cf *CLIConf) error {
default:
// The user is logged in and has typed in `tsh --proxy=... login`, if
// the running binary needs to be updated, update and re-exec.
- if err := updateAndRun(cf.Context, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
+ if err := updater.CheckAndUpdateRemote(cf.Context, teleport.Version, tc.WebProxyAddr, tc.InsecureSkipVerify); err != nil {
return trace.Wrap(err)
}
}
@@ -5625,43 +5598,6 @@ const (
"https://goteleport.com/docs/access-controls/guides/headless/#troubleshooting"
)
-func updateAndRun(ctx context.Context, proxy string, insecure bool) error {
- // The user has typed a command like `tsh ssh ...` without being logged in,
- // if the running binary needs to be updated, update and re-exec.
- //
- // If needed, download the new version of {tsh, tctl} and re-exec. Make
- // sure to exit this process with the same exit code as the child process.
- //
- toolsDir, err := tools.Dir()
- if err != nil {
- return trace.Wrap(err)
- }
- updater := tools.NewUpdater(tools.DefaultClientTools(), toolsDir, teleport.Version)
- toolsVersion, reExec, err := updater.CheckRemote(ctx, proxy, insecure)
- if err != nil {
- return trace.Wrap(err)
- }
- if reExec {
- ctxUpdate, cancel := stacksignal.GetSignalHandler().NotifyContext(context.Background())
- defer cancel()
- // Download the version of client tools required by the cluster.
- err := updater.UpdateWithLock(ctxUpdate, toolsVersion)
- if err != nil && !errors.Is(err, context.Canceled) {
- return trace.Wrap(err)
- }
- // Re-execute client tools with the correct version of client tools.
- code, err := updater.Exec()
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- log.Debugf("Failed to re-exec client tool: %v.", err)
- os.Exit(code)
- } else if err == nil {
- os.Exit(code)
- }
- }
-
- return nil
-}
-
// Lock the process memory to prevent rsa keys and certificates in memory from being exposed in a swap.
func tryLockMemory(cf *CLIConf) error {
if cf.MlockMode == mlockModeAuto {
diff --git a/web/packages/teleterm/src/mainProcess/mainProcess.ts b/web/packages/teleterm/src/mainProcess/mainProcess.ts
index 665fa2cccc357..2987f4e5c7dc9 100644
--- a/web/packages/teleterm/src/mainProcess/mainProcess.ts
+++ b/web/packages/teleterm/src/mainProcess/mainProcess.ts
@@ -53,6 +53,10 @@ import * as grpcCreds from 'teleterm/services/grpcCredentials';
import { createTshdClient, TshdClient } from 'teleterm/services/tshd';
import { loggingInterceptor } from 'teleterm/services/tshd/interceptors';
import { staticConfig } from 'teleterm/staticConfig';
+import {
+ TSH_AUTOUPDATE_ENV_VAR,
+ TSH_AUTOUPDATE_OFF,
+} from 'teleterm/node/tshAutoupdate';
import {
ConfigService,
@@ -188,6 +192,7 @@ export default class MainProcess {
env: {
...process.env,
TELEPORT_HOME: homeDir,
+ [TSH_AUTOUPDATE_ENV_VAR]: TSH_AUTOUPDATE_OFF,
},
}
);
diff --git a/web/packages/teleterm/src/node/README.md b/web/packages/teleterm/src/node/README.md
new file mode 100644
index 0000000000000..28ea74a4d8d5c
--- /dev/null
+++ b/web/packages/teleterm/src/node/README.md
@@ -0,0 +1,2 @@
+Files in this directory are executed within a Node.js process, be it the main process or the shared
+process.
diff --git a/web/packages/teleterm/src/node/tshAutoupdate.ts b/web/packages/teleterm/src/node/tshAutoupdate.ts
new file mode 100644
index 0000000000000..8ac6d73d9b8b2
--- /dev/null
+++ b/web/packages/teleterm/src/node/tshAutoupdate.ts
@@ -0,0 +1,27 @@
+/**
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+/**
+ * An env var which controls whether tsh is going to download an up-to-date version of itself
+ * to ~/.tsh/bin and re-execute itself. In Connect, we always want it to be set to 'off', as Connect
+ * needs to use the bundled tsh where the version of tsh matches exactly the version of Connect.
+ *
+ * See RFD 144 for more details.
+ */
+export const TSH_AUTOUPDATE_ENV_VAR = 'TELEPORT_TOOLS_VERSION';
+export const TSH_AUTOUPDATE_OFF = 'off';
diff --git a/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.test.ts b/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.test.ts
index 1b910e59c94b0..4c3ccc926f40e 100644
--- a/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.test.ts
+++ b/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.test.ts
@@ -331,7 +331,7 @@ describe('buildPtyOptions', () => {
});
expect(processOptions.env.WSLENV).toBe(
- 'CUSTOM_VAR:TERM_PROGRAM:TERM_PROGRAM_VERSION:TELEPORT_CLUSTER:TELEPORT_PROXY:TELEPORT_HOME/p:KUBECONFIG/p'
+ 'CUSTOM_VAR:KUBECONFIG/p:TERM_PROGRAM:TERM_PROGRAM_VERSION:TELEPORT_CLUSTER:TELEPORT_PROXY:TELEPORT_HOME/p:TELEPORT_TOOLS_VERSION'
);
});
});
diff --git a/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.ts b/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.ts
index 0019f255ee44a..d73f888ebefb0 100644
--- a/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.ts
+++ b/web/packages/teleterm/src/services/pty/ptyHost/buildPtyOptions.ts
@@ -21,9 +21,12 @@ import path, { delimiter } from 'path';
import { RuntimeSettings } from 'teleterm/mainProcess/types';
import { PtyProcessOptions } from 'teleterm/sharedProcess/ptyHost';
import { assertUnreachable } from 'teleterm/ui/utils';
-
import { Shell, makeCustomShellFromPath } from 'teleterm/mainProcess/shell';
import { CUSTOM_SHELL_ID } from 'teleterm/services/config/appConfigSchema';
+import {
+ TSH_AUTOUPDATE_ENV_VAR,
+ TSH_AUTOUPDATE_OFF,
+} from 'teleterm/node/tshAutoupdate';
import {
PtyCommand,
@@ -92,6 +95,9 @@ export async function buildPtyOptions({
throw error;
})
.then(({ shellEnv, creationStatus }) => {
+ // combinedEnv is going to be used as env by every command coming out of buildPtyOptions. Some
+ // commands might add extra variables, but they shouldn't remove any of the env vars that are
+ // added here.
const combinedEnv = {
...processEnv,
...shellEnv,
@@ -100,6 +106,7 @@ export async function buildPtyOptions({
TELEPORT_HOME: settings.tshd.homeDir,
TELEPORT_CLUSTER: cmd.clusterName,
TELEPORT_PROXY: cmd.proxyHost,
+ [TSH_AUTOUPDATE_ENV_VAR]: TSH_AUTOUPDATE_OFF,
};
// The regular env vars are not available in WSL,
@@ -108,12 +115,13 @@ export async function buildPtyOptions({
// https://devblogs.microsoft.com/commandline/share-environment-vars-between-wsl-and-windows/
if (settings.platform === 'win32' && shell.binName === 'wsl.exe') {
const wslEnv = [
+ 'KUBECONFIG/p',
'TERM_PROGRAM',
'TERM_PROGRAM_VERSION',
'TELEPORT_CLUSTER',
'TELEPORT_PROXY',
'TELEPORT_HOME/p',
- 'KUBECONFIG/p',
+ TSH_AUTOUPDATE_ENV_VAR,
];
// Preserve the user defined WSLENV and add ours (ours takes precedence).
combinedEnv[WSLENV_VAR] = [combinedEnv[WSLENV_VAR], wslEnv]