diff --git a/.devcontainer/.env b/.devcontainer/.env
new file mode 100644
index 000000000..7398e4add
--- /dev/null
+++ b/.devcontainer/.env
@@ -0,0 +1,12 @@
+# DOCS_NEXT
+REACT_APP_VERSION=0.1-development
+REACT_APP_DOCS_NEXT_HOST=docs-next.example.de
+REACT_APP_DOCS_NEXT_ORG=akyriako
+
+# DOCUSAURUS
+REACT_APP_DOCUSAURUS_BASE_URL=/
+
+# UMAMI
+UMAMI_WEBSITE_ID=00000000-0000-0000-0000-000000000000
+UMAMI_ANALYTICS_DOMAIN=localhost
+
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index e50f72c61..dbdc906d8 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -21,7 +21,8 @@
"yzhang.markdown-all-in-one",
"DavidAnson.vscode-markdownlint",
"redhat.vscode-yaml",
- "TakumiI.markdowntable"
+ "TakumiI.markdowntable",
+ "Perkovec.emoji"
]
}
},
@@ -37,7 +38,11 @@
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
- "postCreateCommand": "npm install"
+ "postCreateCommand": "npm install",
+
+ "runArgs": [
+ "--env-file", "${localWorkspaceFolder}/.devcontainer/.env"
+ ]
// Configure tool-specific properties.
// "customizations": {},
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..cf1ef94c1
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,2 @@
+# Admins must approve changes to docs-next repository
+/docs/ @opentelekomcloud/ac-content-reviewer
diff --git a/.github/workflows/automerge.yaml b/.github/workflows/automerge.yaml
new file mode 100644
index 000000000..3146e77b5
--- /dev/null
+++ b/.github/workflows/automerge.yaml
@@ -0,0 +1,42 @@
+name: automerge
+
+on:
+ pull_request:
+ types:
+ - labeled
+ - unlabeled
+ - synchronize
+ - opened
+ - edited
+ - ready_for_review
+ - reopened
+ - unlocked
+ pull_request_review:
+ types:
+ - submitted
+ check_suite:
+ types:
+ - completed
+ status: {}
+
+jobs:
+ automerge:
+ runs-on: ubuntu-latest
+ if: >
+ contains(github.event.pull_request.labels.*.name, 'gate')
+ environment: github
+ steps:
+ - name: Create GitHub App Token
+ id: app-token
+ uses: actions/create-github-app-token@v1
+ with:
+ app-id: ${{ secrets.APP_ID }}
+ private-key: ${{ secrets.APP_KEY }}
+
+ - id: automerge
+ name: automerge
+ uses: pascalgn/automerge-action@v0.16.3
+ env:
+ GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
+ MERGE_LABELS: "gate"
+ MERGE_METHOD: "squash"
diff --git a/.github/workflows/build-publish-ephemeral.yaml b/.github/workflows/build-publish-ephemeral.yaml
new file mode 100644
index 000000000..3bd31fdb2
--- /dev/null
+++ b/.github/workflows/build-publish-ephemeral.yaml
@@ -0,0 +1,47 @@
+name: Manage Pull Request Preview Instances
+
+on:
+ pull_request:
+ types:
+ - opened
+ - reopened
+ - synchronize
+ - closed
+
+concurrency: preview-${{ github.ref }}
+
+jobs:
+ deploy-ephemeral-preview:
+ name: Deploy or Remove Ephemeral Preview
+ environment:
+ name: pull-requests-preview
+ url: 'https://${{ github.repository_owner }}.github.io${{ vars.DOCUSAURUS_BASE_URL }}pr-${{ github.event.pull_request.number }}'
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Get Commit Hash
+ id: commit_hash
+ uses: prompt/actions-commit-hash@v3
+
+ - name: Install and Build
+ if: github.event.action != 'closed'
+ env:
+ REACT_APP_VERSION: ${{ vars.APP_VERSION }}.PR${{ github.event.pull_request.number }}-${{ github.run_number }}-${{ steps.commit_hash.outputs.short }}-ephemeral
+ REACT_APP_DOCS_NEXT_HOST: ${{ vars.DOCS_NEXT_HOST }}
+ REACT_APP_DOCS_NEXT_ORG: ${{ vars.DOCS_NEXT_ORG }}
+ REACT_APP_DOCUSAURUS_BASE_URL: ${{ vars.DOCUSAURUS_BASE_URL }}pr-${{ github.event.pull_request.number }}
+ UMAMI_WEBSITE_ID: ${{ vars.UMAMI_WEBSITE_ID }}
+ UMAMI_ANALYTICS_DOMAIN: ${{ vars.UMAMI_ANALYTICS_DOMAIN }}
+ UMAMI_DATAHOST_URL: ${{ vars.UMAMI_DATAHOST_URL }}
+ UMAMI_DATA_DOMAIN: ${{ vars.UMAMI_DATA_DOMAINS }}
+ run: |
+ npm install
+ npm run build
+
+ - name: Deploy preview
+ uses: rossjrw/pr-preview-action@v1
+ with:
+ source-dir: ./build/
\ No newline at end of file
diff --git a/.github/workflows/build-publish-production.yaml b/.github/workflows/build-publish-production.yaml
new file mode 100644
index 000000000..cbf818cea
--- /dev/null
+++ b/.github/workflows/build-publish-production.yaml
@@ -0,0 +1,135 @@
+name: Build and Deploy Production Docker Images
+
+on:
+ push:
+ tags:
+ - "v*.*.*"
+ workflow_dispatch:
+
+jobs:
+ build-stable:
+ name: Build Production Artifacts
+ environment:
+ name: stable
+ runs-on: ubuntu-latest
+ outputs:
+ image_version: ${{ env.IMAGE_SEMVER }}
+ commit_hash: ${{ steps.export_commit_hash.outputs.commit_hash }}
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup NodeJS
+ uses: actions/setup-node@v4
+ with:
+ node-version: 'lts/*'
+
+ - name: Install NodeJS Dependencies
+ run: npm install
+
+ - name: Get Commit Hash
+ id: commit_hash
+ uses: prompt/actions-commit-hash@v3
+
+ - name: Build Version Tag
+ id: build_version_tag
+ run: echo "IMAGE_SEMVER=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
+
+ - name: Export Commit Hash
+ id: export_commit_hash
+ run: echo "commit_hash=${{ steps.commit_hash.outputs.short }}" >> $GITHUB_OUTPUT
+
+ - name: Build Container Image Metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ # list of Docker images to use as base name for tags
+ images: |
+ ${{ vars.REGISTRY }}/${{ vars.REGISTRY_ORG }}/${{ vars.IMG_NAME }}
+ # generate Docker tags based on the following events/attributes
+ tags: |
+ type=semver,pattern=v{{major}}.{{minor}}.{{patch}}
+ type=raw,value=latest
+
+
+ - name: Build App
+ env:
+ REACT_APP_VERSION: ${{ env.IMAGE_SEMVER }}
+ REACT_APP_DOCS_NEXT_HOST: ${{ vars.DOCS_NEXT_HOST }}
+ REACT_APP_DOCS_NEXT_ORG: ${{ vars.DOCS_NEXT_ORG }}
+ REACT_APP_DOCUSAURUS_BASE_URL: ${{ vars.DOCUSAURUS_BASE_URL }}
+ REACT_APP_TYPESENSE_PROTOCOL: ${{ vars.TYPESENSE_PROTOCOL }}
+ REACT_APP_TYPESENSE_HOST: ${{ vars.TYPESENSE_HOST }}
+ REACT_APP_TYPESENSE_PORT: ${{ vars.TYPESENSE_PORT }}
+ REACT_APP_TYPESENSE_API_KEY: ${{ secrets.TYPESENSE_SEARCH_KEY }}
+ UMAMI_WEBSITE_ID: ${{ vars.UMAMI_WEBSITE_ID }}
+ UMAMI_ANALYTICS_DOMAIN: ${{ vars.UMAMI_ANALYTICS_DOMAIN }}
+ UMAMI_DATAHOST_URL: ${{ vars.UMAMI_DATAHOST_URL }}
+ UMAMI_DATA_DOMAIN: ${{ vars.UMAMI_DATA_DOMAINS }}
+ run: npm run build
+
+ - name: Login to Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ vars.REGISTRY }}
+ username: ${{ secrets.REGISTRY_USER }}
+ password: ${{ secrets.REGISTRY_PASSWORD }}
+
+ - name: Setup Docker Buildx
+ id: buildx
+ uses: docker/setup-buildx-action@v3.7.1
+
+ - name: Build and Push (Docker Image)
+ id: docker_build
+ uses: docker/build-push-action@v6.9.0
+ with:
+ context: ./
+ file: ./Dockerfile
+ provenance: false
+ push: ${{ github.event_name != 'pull_request' }}
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ annotations: ${{ steps.meta.outputs.annotations }}
+
+ update-helm-charts:
+ needs: [build-stable]
+ environment: stable
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+
+ steps:
+ # - name: Create GitHub App Token
+ # id: app-token
+ # uses: actions/create-github-app-token@v1
+ # with:
+ # app-id: ${{ secrets.APP_ID }}
+ # private-key: ${{ secrets.APP_KEY }}
+ - name: Checkout Charts Repo
+ uses: actions/checkout@v4
+ with:
+ repository: "${{ vars.DOCS_NEXT_CHARTS_ORG }}/${{ vars.DOCS_NEXT_CHARTS_REPO }}"
+ token: ${{ secrets.DOCS_NEXT_TOKEN }}
+ - name: Commit Changes
+ env:
+ image: ${{ vars.REGISTRY }}/${{ vars.REGISTRY_ORG }}/${{ vars.IMG_NAME }}
+ tag: ${{ needs.build-stable.outputs.image_version }}
+ run: |
+ git config --global user.name 'otcbot'
+ git config --global user.email 'otc_ecosystem_squad@t-systems.com'
+ sed -i 's|^version: .*|version: 0.3.${{github.run_number}}|' ./charts/docusaurus/Chart.yaml
+ sed -i 's|^appVersion: .*|appVersion: ${{ env.tag }}|' ./charts/docusaurus/Chart.yaml
+ sed -i 's|^tag: .*|tag: ${{ env.tag }}|' ./charts/docusaurus/values-prod.yaml
+ sed -i 's|^image: .*|image: ${{ env.image }}|' ./charts/docusaurus/values-prod.yaml
+ git commit -am "Automatic commit from GitHub Actions triggered by action ${{github.run_number}}"
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v7
+ env:
+ remote_pr_branch: 'release/production-${{ needs.build-stable.outputs.image_version }}'
+ with:
+ title: ${{ env.remote_pr_branch }}
+ token: ${{ secrets.DOCS_NEXT_TOKEN }}
+ branch: ${{ env.remote_pr_branch }}
+
\ No newline at end of file
diff --git a/.github/workflows/build-publish-staging.yaml b/.github/workflows/build-publish-staging.yaml
new file mode 100644
index 000000000..6402a4781
--- /dev/null
+++ b/.github/workflows/build-publish-staging.yaml
@@ -0,0 +1,133 @@
+name: Build and Deploy Staging Docker Images
+
+on:
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - '**/README.md'
+ - '**/CONTRIBUTING.md'
+ - '**/CONFIGURATION.md'
+ - '**/.devcontainer/**'
+ - "**/.github/workflows/**"
+ workflow_dispatch:
+
+jobs:
+ build-preview:
+ name: Build Staging Artifacts
+ environment:
+ name: preview
+ runs-on: ubuntu-latest
+ outputs:
+ image_version: ${{ steps.build_image_tag.outputs.image_version }}
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup NodeJS
+ uses: actions/setup-node@v4
+ with:
+ node-version: 'lts/*'
+
+ - name: Install NodeJS Dependencies
+ run: npm install
+
+ - name: Get Commit Hash
+ id: commit_hash
+ uses: prompt/actions-commit-hash@v3
+
+ - name: Get Current Date
+ id: date
+ run: echo "today=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
+
+ - name: Build Image Tag
+ id: build_image_tag
+ run: echo "image_version=${{ steps.date.outputs.today }}.${{github.run_number}}.0-${{ steps.commit_hash.outputs.short }}" >> $GITHUB_OUTPUT
+
+ - name: Build Container Image Metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ ${{ vars.REGISTRY }}/${{ vars.REGISTRY_ORG }}/${{ vars.IMG_NAME }}
+ tags: |
+ type=raw,value=${{ steps.build_image_tag.outputs.image_version }}
+
+ - name: Build App
+ env:
+ REACT_APP_VERSION: ${{ steps.build_image_tag.outputs.image_version }}
+ REACT_APP_DOCS_NEXT_HOST: ${{ vars.DOCS_NEXT_HOST }}
+ REACT_APP_DOCS_NEXT_ORG: ${{ vars.DOCS_NEXT_ORG }}
+ REACT_APP_DOCUSAURUS_BASE_URL: ${{ vars.DOCUSAURUS_BASE_URL }}
+ REACT_APP_TYPESENSE_PROTOCOL: ${{ vars.TYPESENSE_PROTOCOL }}
+ REACT_APP_TYPESENSE_HOST: ${{ vars.TYPESENSE_HOST }}
+ REACT_APP_TYPESENSE_PORT: ${{ vars.TYPESENSE_PORT }}
+ REACT_APP_TYPESENSE_API_KEY: ${{ secrets.TYPESENSE_SEARCH_KEY }}
+ UMAMI_WEBSITE_ID: ${{ vars.UMAMI_WEBSITE_ID }}
+ UMAMI_ANALYTICS_DOMAIN: ${{ vars.UMAMI_ANALYTICS_DOMAIN }}
+ UMAMI_DATAHOST_URL: ${{ vars.UMAMI_DATAHOST_URL }}
+ UMAMI_DATA_DOMAIN: ${{ vars.UMAMI_DATA_DOMAINS }}
+ run: npm run build
+
+ - name: Login to Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ vars.REGISTRY }}
+ username: ${{ secrets.REGISTRY_USER }}
+ password: ${{ secrets.REGISTRY_PASSWORD }}
+
+ - name: Setup Docker Buildx
+ id: buildx
+ uses: docker/setup-buildx-action@v3.7.1
+
+ - name: Build and Push (Docker Image)
+ id: docker_build
+ uses: docker/build-push-action@v6.9.0
+ with:
+ context: ./
+ file: ./Dockerfile
+ provenance: false
+ push: ${{ github.event_name != 'pull_request' }}
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ annotations: ${{ steps.meta.outputs.annotations }}
+
+ update-helm-charts:
+ needs: [build-preview]
+ environment: preview
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+
+ steps:
+ # - name: Create GitHub App Token
+ # id: app-token
+ # uses: actions/create-github-app-token@v1
+ # with:
+ # app-id: ${{ secrets.APP_ID }}
+ # private-key: ${{ secrets.APP_KEY }}
+ - name: Checkout Charts Repo
+ uses: actions/checkout@v4
+ with:
+ repository: "${{ vars.DOCS_NEXT_CHARTS_ORG }}/${{ vars.DOCS_NEXT_CHARTS_REPO }}"
+ token: ${{ secrets.DOCS_NEXT_TOKEN }}
+ - name: Commit Changes
+ env:
+ image: ${{ vars.REGISTRY }}/${{ vars.REGISTRY_ORG }}/${{ vars.IMG_NAME }}
+ tag: ${{ needs.build-preview.outputs.image_version }}
+ run: |
+ git config --global user.name 'otcbot'
+ git config --global user.email 'otc_ecosystem_squad@t-systems.com'
+ sed -i 's|^tag: .*|tag: ${{ env.tag }}|' ./charts/docusaurus/values-stg.yaml
+ sed -i 's|^image: .*|image: ${{ env.image }}|' ./charts/docusaurus/values-stg.yaml
+ git commit -am "Automatic commit from GitHub Actions triggered by action ${{github.run_number}}"
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v7
+ env:
+ remote_pr_branch: 'release/staging-${{ needs.build-preview.outputs.image_version }}'
+ with:
+ title: ${{ env.remote_pr_branch }}
+ token: ${{ secrets.DOCS_NEXT_TOKEN }}
+ branch: ${{ env.remote_pr_branch }}
diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml
new file mode 100644
index 000000000..cc096e543
--- /dev/null
+++ b/.github/workflows/check.yaml
@@ -0,0 +1,23 @@
+name: check
+
+on:
+ pull_request:
+ branches: [ '*' ]
+
+jobs:
+ check:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Use Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: 'lts/*'
+ cache: 'yarn'
+
+ - name: Install dependencies
+ run: yarn install
+
+ - name: Run typecheck
+ run: yarn lint
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
deleted file mode 100644
index 25a9f3c0e..000000000
--- a/.github/workflows/release.yaml
+++ /dev/null
@@ -1,114 +0,0 @@
-on:
- push:
- branches:
- - main
- paths-ignore:
- - '**/README.md'
- - '**/CONTRIBUTING.md'
- - '**/.devcontainer/devcontainer.json'
-
-jobs:
- build:
- environment: preview
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
-
- - name: Setup NodeJS
- uses: actions/setup-node@v2
- with:
- node-version: 18 # Use Node.js 18 here
-
- - name: Install Dependencies
- run: npm install
-
- - name: Build App
- env:
- REACT_APP_TYPESENSE_PROTOCOL: ${{ vars.TYPESENSE_PROTOCOL }}
- REACT_APP_TYPESENSE_HOST: ${{ vars.TYPESENSE_HOST }}
- REACT_APP_TYPESENSE_PORT: ${{ vars.TYPESENSE_PORT }}
- REACT_APP_TYPESENSE_API_KEY: ${{ secrets.TYPESENSE_SEARCH_KEY }}
- run: npm run build
-
- - name: Login to Docker Hub
- uses: docker/login-action@v3
- with:
- username: ${{ secrets.DOCKERHUB_USERNAME }}
- password: ${{ secrets.DOCKERHUB_TOKEN }}
-
- - name: Setup Docker Buildx
- id: buildx
- uses: docker/setup-buildx-action@v1
-
- - name: Get Commit Hash
- id: commit_hash
- uses: prompt/actions-commit-hash@v3
- - uses: docker/setup-buildx-action@v3
- - name: Build and Push (Docker Image)
- id: docker_build
- uses: docker/build-push-action@v2
- with:
- context: ./
- file: ./Dockerfile
- push: true
- tags: |
- ${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.IMG_NAME }}:latest
- ${{ secrets.DOCKERHUB_USERNAME }}/${{ vars.IMG_NAME }}:${{ vars.APP_VERSION }}.${{github.run_number}}-${{ steps.commit_hash.outputs.short }}
-
- # - name: Image Digest
- # run: echo ${{ steps.docker_build.outputs.digest }}
-
- update-helm-charts:
- needs: [build]
- environment: preview
- runs-on: ubuntu-latest
-
- steps:
- - name: Get Commit Hash
- id: commit_hash
- uses: prompt/actions-commit-hash@v3
-
- - name: Show Commit Hash Digest
- run: echo ${{ steps.commit_hash.outputs.short }}
-
- - name: Configure Git User as GitHub Actions Bot
- run: |
- git config --global user.name 'github-actions[bot]'
- git config --global user.email 'github-actions[bot]@users.noreply.github.com'
-
- - name: Checkout
- uses: actions/checkout@v4
- with:
- repository: "${{ vars.DOCS_NEXT_CHARTS_ORG }}/${{ vars.DOCS_NEXT_CHARTS_REPO }}"
- token: ${{ secrets.DOCS_NEXT_CHARTS_TOKEN }}
-
- - name: Show Contents
- run: ls -latr
-
- - name: Update Charts and Commit Changes
- id: update_charts
- env:
- image: ${{ secrets.DOCKERHUB_USERNAME }}\/${{ vars.IMG_NAME }}
- run: |
- # docusaurus
- sed -i 's/^version: .*/version: ${{ vars.APP_VERSION }}.${{github.run_number}}/' ./charts/docusaurus/Chart.yaml
- sed -i 's/^appVersion: .*/appVersion: ${{ vars.APP_VERSION }}.${{github.run_number}}-${{ steps.commit_hash.outputs.short }}/' ./charts/docusaurus/Chart.yaml
- sed -i 's/^tag: .*/tag: ${{ vars.APP_VERSION }}.${{github.run_number}}-${{ steps.commit_hash.outputs.short }}/' ./charts/docusaurus/values.yaml
- sed -i 's/^image: .*/image: ${{ env.image }}/' ./charts/docusaurus/values.yaml
- cat ./charts/docusaurus/Chart.yaml
- echo ""
- echo "---"
- echo ""
- cat ./charts/docusaurus/values.yaml
- # commit and push
- git commit -am "Automatic commit from GitHub Actions triggered by action #${{github.run_number}}"
- git remote set-url origin https://${{ secrets.DOCS_NEXT_CHARTS_TOKEN }}@github.com/${{ vars.DOCS_NEXT_CHARTS_ORG }}/${{ vars.DOCS_NEXT_CHARTS_REPO }}.git
- git push origin main
-
-
-
-
-
-
\ No newline at end of file
diff --git a/CONFIGURATION.md b/CONFIGURATION.md
new file mode 100644
index 000000000..9c4cd5436
--- /dev/null
+++ b/CONFIGURATION.md
@@ -0,0 +1,82 @@
+# GitHub Environments Configuration
+
+You need to configure 4 GitHub environments:
+
+- **preview**: for staging
+- **stable**: for production
+- **pull-requests-preview**: for ephemeral deployments for PR reviews
+- **gh-pages**: for GitHub pages publishing and deployment
+
+## GitHub Pages
+
+Create a new branch, `gh-pages`, and go to *Settings* -> *Pages* of the repository:
+
+![alt text](static/img/configure_gh_pages.png)
+
+Choose **Deploy from a branch** as *Source*, and as *Branch* **gh-pages/root**.
+
+> [!IMPORTANT]
+> Do this for **both** repositories, **docs-next** and **docs-next-chart**!
+
+## Helm Charts Repository Token Configuration (docs-next-charts)
+
+Go to *Account* -> *Settings* -> *Developer Settings* -> *Personal Access Tokens* -> *Fine-grained tokens* and click *Generate new token*. Create a new token with the name **docs-next-charts-token** and give to it access to the repository: **docs-next-charts**. Assign the following permissions to the token:
+
+- **Read access to metadata**
+- **Read and Write access to actions, code, commit statuses, and workflows**
+
+![alt text](static/img/cross_repo_commit_token.png)
+
+> [!IMPORTANT]
+>Save the value of the token, you are going to set it afterwards as the value of the secret `DOCS_NEXT_CHARTS_TOKEN`.
+
+## Code Repository Configuration (docs-next)
+
+### Workflow Permissions
+
+Go to *Settings* -> *Actions* -> *General* of the repository and choose **Read and write permissions** as *Workflow Permissions*. Click *Save* to persist changes:
+
+![alt text](static/img/workflow_permissions.png)
+
+### Variables
+
+| Variable | pull-requests-preview | preview | stable | Default/Description |
+| :--------------------- | :----------------------------: | :----------------: | :----------------: | :------------------------- |
+| APP_VERSION | 1️⃣ | 1️⃣ | 1️⃣ | `0.1` |
+| IMG_NAME | ❌ | `docs-next` | `docs-next` | Docker Image Name |
+| DOCS_NEXT_ORG | 1️⃣ | 1️⃣ | 1️⃣ | GitHub Org Name |
+| DOCS_NEXT_REPO | ❌ | `docs-next` | `docs-next` | GitHub Repo Name |
+| DOCS_NEXT_HOST | `$DOCS_NEXT_ORG`.github.io | ✅ | ✅ | Domain name |
+| DOCS_NEXT_CHARTS_ORG | ❌ | 1️⃣ | 1️⃣ | GitHub Org Name |
+| DOCS_NEXT_CHARTS_REPO | ❌ | `docs-next-charts` | `docs-next-charts` | GitHub Repo Name |
+| DOCUSAURUS_BASE_URL | `/docs-next/pr-preview/` | ❌ | ❌ | Docusaurus `baseUrl` |
+| TYPESENSE_HOST | ❌ | ✅ | ✅ | Domain name |
+| TYPESENSE_PROTOCOL | ❌ | ✅ | ✅ | `https` |
+| TYPESENSE_PORT | ❌ | ✅ | ✅ | `443` |
+| UMAMI_ANALYTICS_DOMAIN | `analytics.example.de` | ✅ | ✅ | Domain name |
+| UMAMI_DATAHOST_URL | `https://analytics.example.de` | ✅ | ✅ | Umami URL |
+| UMAMI_DATA_DOMAINS | ✅ | `$DOCS_NEXT_HOST` | `$DOCS_NEXT_HOST` | Umami Allowed CORS Domains |
+| UMAMI_WEBSITE_ID | `00000` | ✅ | ✅ | Umami WebSite ID |
+
+> [!NOTE]
+> ✅ : Yes,
+> 1️⃣ : Yes but horizontally identical value,
+> ❌ : No,
+>
+> Otherwise use the default value or the one dictated per environment.
+
+### Secrets
+
+| Secret | pull-requests-preview | preview | stable | Default/Description |
+| :--------------------- | :-------------------: | :-----: | :----: | :--------------------------------- |
+| DOCKERHUB_USERNAME | ❌ | ✅ | ✅ | Container Registry User |
+| DOCKERHUB_TOKEN | ❌ | ✅ | ✅ | Container Registry Access Token |
+| DOCS_NEXT_CHARTS_TOKEN | ❌ | 1️⃣ | 1️⃣ | DOCS_NEXT_CHARTS_REPO Access Token |
+| TYPESENSE_API_KEY | ❌ | ✅ | ✅ | TypeSense Admin API Key |
+| TYPESENSE_SEARCH_KEY | ❌ | ✅ | ✅ | TypeSense Search API Key |
+
+> [!NOTE]
+> ✅ : Yes,
+> 1️⃣ : Yes but horizontally identical value,
+> ❌ : No
+
diff --git a/README.md b/README.md
index 771aa29ac..fb17d3614 100644
--- a/README.md
+++ b/README.md
@@ -111,7 +111,7 @@ npm run serve
## Deployment
You can deploy docs-next in a various infrastructure (as every React/TS application). You can just spin a docker container, or deploy it directly on
-an ECS Server or on a CCE Kubernetes Cluster (recommended). Check the architecture and provided Helm Charts for the latter at [Open Telekom Cloud Architecture Center Helm Charts](https://github.com/akyriako/docs-next-charts) repository.
+an ECS Server or on a CCE Kubernetes Cluster (recommended). Check the architecture and provided Helm Charts for the latter at [Open Telekom Cloud Architecture Center Helm Charts](https://github.com/opentelekomcloud-infra/docs-next-charts) repository.
### Manual
@@ -136,7 +136,7 @@ The repository is already employed with a GitHub Release Workflow that will do t
1. Builds the application for production (`npm run build`)
2. Builds and tags a container image and push the image to a predefined docker hub organization
-3. Updates the Helm Charts with new versions and image tags in [Open Telekom Cloud Architecture Center Helm Charts](https://github.com/akyriako/docs-next-charts)
+3. Updates the Helm Charts with new versions and image tags in [Open Telekom Cloud Architecture Center Helm Charts](https://github.com/opentelekomcloud-infra/docs-next-charts)
ArgoCD (deployed on the same CCE Cluster) will pick up the changes, within its `timeout.reconciliation` value (default is *180s*), and provision
the changes without any human intervention.
diff --git a/blog/2019-05-28-first-blog-post.md b/blog/2019-05-28-first-blog-post.md
deleted file mode 100644
index 02f3f81bd..000000000
--- a/blog/2019-05-28-first-blog-post.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-slug: first-blog-post
-title: First Blog Post
-authors:
- name: Gao Wei
- title: Docusaurus Core Team
- url: https://github.com/wgao19
- image_url: https://github.com/wgao19.png
-tags: [hola, docusaurus]
----
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
diff --git a/blog/2019-05-29-long-blog-post.md b/blog/2019-05-29-long-blog-post.md
deleted file mode 100644
index 26ffb1b1f..000000000
--- a/blog/2019-05-29-long-blog-post.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-slug: long-blog-post
-title: Long Blog Post
-authors: endi
-tags: [hello, docusaurus]
----
-
-This is the summary of a very long blog post,
-
-Use a `` comment to limit blog post size in the list view.
-
-
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
diff --git a/blog/2021-08-01-mdx-blog-post.mdx b/blog/2021-08-01-mdx-blog-post.mdx
deleted file mode 100644
index c04ebe323..000000000
--- a/blog/2021-08-01-mdx-blog-post.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
----
-slug: mdx-blog-post
-title: MDX Blog Post
-authors: [slorber]
-tags: [docusaurus]
----
-
-Blog posts support [Docusaurus Markdown features](https://docusaurus.io/docs/markdown-features), such as [MDX](https://mdxjs.com/).
-
-:::tip
-
-Use the power of React to create interactive blog posts.
-
-```js
-
-```
-
-
-
-:::
diff --git a/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg b/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg
deleted file mode 100644
index 11bda0928..000000000
Binary files a/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg and /dev/null differ
diff --git a/blog/2021-08-26-welcome/index.md b/blog/2021-08-26-welcome/index.md
deleted file mode 100644
index 9455168f1..000000000
--- a/blog/2021-08-26-welcome/index.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-slug: welcome
-title: Welcome
-authors: [slorber, yangshun]
-tags: [facebook, hello, docusaurus]
----
-
-[Docusaurus blogging features](https://docusaurus.io/docs/blog) are powered by the [blog plugin](https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog).
-
-Simply add Markdown files (or folders) to the `blog` directory.
-
-Regular blog authors can be added to `authors.yml`.
-
-The blog post date can be extracted from filenames, such as:
-
-- `2019-05-30-welcome.md`
-- `2019-05-30-welcome/index.md`
-
-A blog post folder can be convenient to co-locate blog post images:
-
-![Docusaurus Plushie](./docusaurus-plushie-banner.jpeg)
-
-The blog supports tags as well!
-
-**And if you don't want a blog**: just delete this directory, and use `blog: false` in your Docusaurus config.
diff --git a/blog/authors.yml b/blog/authors.yml
deleted file mode 100644
index bcb299156..000000000
--- a/blog/authors.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-endi:
- name: Endilie Yacop Sucipto
- title: Maintainer of Docusaurus
- url: https://github.com/endiliey
- image_url: https://github.com/endiliey.png
-
-yangshun:
- name: Yangshun Tay
- title: Front End Engineer @ Facebook
- url: https://github.com/yangshun
- image_url: https://github.com/yangshun.png
-
-slorber:
- name: Sébastien Lorber
- title: Docusaurus maintainer
- url: https://sebastienlorber.com
- image_url: https://github.com/slorber.png
diff --git a/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.md b/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.md
deleted file mode 100644
index 0bfcc021f..000000000
--- a/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.md
+++ /dev/null
@@ -1,1287 +0,0 @@
----
-id: creating-a-linux-Image-using-virtualBox-and-an-iso-file
-title: Creating a Linux Image Using VirtualBox and an ISO File
-tags: [ims, migration]
----
-
-# Creating a Linux Image Using VirtualBox and an ISO File
-
-Introduction
-------------
-
-#### VirtualBox
-
-VirtualBox is free, open-source virtualization software. It was first offered by InnoTek GmbH from Germany and re-branded as Oracle VM VirtualBox when InnoTek was acquired by Oracle Corporation.
-
-For more information about VirtualBox, visit the Oracle official website. Click [here](https://www.virtualbox.org/wiki/Guest_OSes) to see the guest OSs that can work with VirtualBox.
-
-#### Scenarios
-
-You can use a 32-bit or 64-bit Linux guest OS provided by VirtualBox to create an image file in VHD format.
-
-#### Advantages
-
-You can customize Linux image files.
-
-#### Tools and Costs
-
-#### Image Creation Process
-
-The following figure shows how to use VirtualBox to create an image from an ISO file.
-
-**Figure 1** Image creation process
-![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0200645302.png)
-
-Step 1: Installing VirtualBox
------------------------------
-
-#### Preparations
-
-The host where VirtualBox is to be installed must meet the following requirements:
-
-* A 64-bit Windows OS (recommended).
-* At least 4 GB of memory and a dual-core processor. For example, the host specifications can be 8U16G.
-* At least 20 GB of available disk space.
-* Hardware virtualization (Intel VT-x or AMD-V). For how to enable this, see [Host CPU Settings (Hardware Virtualization)](#ims_bp_0017__section1503794314311).
-
-#### Host CPU Settings (Hardware Virtualization)
-
-For an Intel host, perform the following operations to enable hardware virtualization:
-
-![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
-The operations may differ depending on the CPU type. You can do it as prompted.
-
-1. During the host startup, press the BIOS key set by the manufacturer to access the BIOS.
-2. Choose **Configuration** > **Intel Virtual Technology**, and press **Enter**.
-3. Select **Enabled** and press **Enter**. The value of **Intel Virtual Technology** will become **Enabled**.
-4. Press **F10** to save the settings and exit.
-
- **Figure 1** Enabling hardware virtualization
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0107215471.png)
-
-
-#### Procedure
-
-1. Download the VirtualBox installation package. VirtualBox-5.2.0 is used as an example.
-
- Download it from [https://www.virtualbox.org/wiki/Downloads](https://www.virtualbox.org/wiki/Downloads).
-
-2. Decompress the package. Right-click **VirtualBox-5.2.0-118431-Win.exe**, choose **Run as administrator**, and click **Next**.
-
- **Figure 2** Installing VirtualBox
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0107215473.png)
-
-3. Select the VirtualBox installation path and click **Next**.
-
- **Figure 3** Selecting an installation path
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0107215475.png)
-
-4. Personalize the settings and click **Next**.
-
- **Figure 4** Personalized settings
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0107215477.png)
-
-5. Click **Finish**.
-
-
-Step 2: Creating a VM and Installing an OS
-------------------------------------------
-
-## Creating an Empty VM
-
-#### Prerequisites
-
-VirtualBox has been installed.
-
-#### Procedure
-
-1. Open VirtualBox and click **New**. In the displayed **Create Virtual Machine** dialog box, enter a VM name, select an OS type and version, and click **Next**.
-
- Take Ubuntu as an example. The type must be **Linux**.
-
- Ensure that the selected version is the same as that of the OS you want to install on the VM.
-
- **Figure 1** Creating a VM
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268280658.png)
-
-2. In the **Memory size** dialog box, set a value and click **Next**.
-
- You can reference the VM specifications or official OS requirements. The minimum value is 256 MB. You can set the memory size to 512 MB as an example.
-
- **Figure 2** Setting the memory size
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268284967.png)
-
-3. In the **Hard disk** dialog box, select **Create a virtual hard disk now** and click **Create**.
-
- **Figure 3** Creating a virtual hard disk
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268287010.png)
-
-4. In the **Hard disk file type** dialog box, select **VHD** and click **Next**.
-
- **Figure 4** Setting the hard disk file type
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268287244.png)
-
-5. In the **Storage on physical hard disk** dialog box, select **Dynamically allocated** and click **Next**.
-
- **Figure 5** Selecting the disk allocation mode
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268288436.png)
-
-6. In the **File location and size** dialog box, set the disk size and storage location.
-
- For example, you can set the disk size to 20 GB.
-
- **Figure 6** Setting the disk location and size
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268290676.png)
-
-7. Click **Create**.
-
-## Installing a Linux OS on the VM
-
-The procedure varies depending on the image file you use. This section uses Ubuntu 20.04 as an example to describe how to install a Linux OS on the VM.
-
-#### Prerequisites
-
-You have obtained the ISO image file, for example, **Ubuntu-20.04-server.iso**.
-
-#### Procedure
-
-Use the ISO file to install Linux for the empty VM.
-
-1. In VirtualBox Manager, select the new VM and click **Settings**.
-
- **Figure 1** Setting the VM
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268393798.png)
-
-2. Choose **Storage** > **Empty**, click ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268393752.png) in the **Attributes** area, and select the ISO image file **Ubuntu-20.04-server.iso**.
-
- **Figure 2** Selecting the ISO file to be mounted
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268393846.png)
-
- **Figure 3** Mounted ISO file
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001457709502.png)
-
-3. Click **OK**.
-4. In VirtualBox Manager, select the new VM and click **Start**.
-
- **Figure 4** Starting the VM
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0268337032.png)
-
-5. Install the OS.
- 1. Select **English** and press **Enter**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514305585.png)
-
- 2. Select **Continue without updating**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001464149800.png)
-
- 3. Retain the default settings for the keyboard. Select **Done**
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514311097.png)
-
- 4. Retain the default settings for the installation base. Select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514670405.png)
-
- 5. Retain the default settings for the network. Select **Done**.
-
- The installation program will automatically identify the IP address. If the network cannot be found, the installation program can still continue and you can configure the network again after the installation is complete.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001463836772.png)
-
- 6. Retain the default settings for the proxy. Select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514558429.png)
-
- 7. Retain the default settings for the software source. Select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001463840256.png)
-
- 8. Retain the default settings for disk partitioning (use an entire disk and set up this disk as an LVM group). Select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514561077.png)
-
- The file system information will be displayed. Check it and select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001463841764.png)
-
- Confirm the destructive action and select **Continue**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514681605.png)
-
- 9. Configure the server name, username, and password. Select **Done**.
-
- **Your name**: It is not a username for logging in to the server. You can consider it as server description.
-
- **Your Server's name**: It is a unique server name on the same network. The name cannot contain uppercase letters.
-
- **Pick a username**: It is a username for logging in to the server. If you forget it or its password, you will not be allowed to log in to the server.
-
- **Choose a password**: It is the password for logging in to the server.
-
- **Confirm your password**: Enter your password again.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001464161876.png)
-
- 10. Install SSH so that you can remotely connect to the Linux server.
-
- Select **Install OpenSSH server**. Then, press **Tab** to select **Done**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001514587325.png)
-
- 11. Select **Done** to start the OS installation.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001464002560.png)
-
- 12. After the installation is complete, select **Reboot** to restart the system.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001464031938.png)
-
-
-
-Step 3: Configuring the VM
---------------------------
-
-## Installing Drivers and Changing the Disk Identifiers to the UUID Format
-
-To ensure that the ECSs created from the image support both Xen and KVM virtualization, install Native Xen and KVM drivers and change the disk identifiers to the UUID format for the VM which is used as the image source.
-
-This section describes how to perform these operations on a Linux VM that runs Ubuntu 20.04. For other OSs, see [Optimization Process (Linux)](https://support.huaweicloud.com/intl/en-us/usermanual-ims/en-us_topic_0047501133.html).
-
-#### Install Native Xen and KVM Drivers
-
-1. Run the following command to open the **modules** file:
-
- **vi /etc/initramfs-tools/modules**
-
-2. Press **i** to enter the editing mode and add the native Xen (xen-pv) and KVM (virtio) drivers to the **/etc/initramfs-tools/modules** file (the format depends on the OS requirements).
-
- ``
-[root@CTU10000xxxxx ~]#vi /etc/initramfs-tools/modules
-...
-# Examples:
-#
-# raid1
-# sd_mOd
-xen-blkfront
-xen-netfront
-virtio_blk
-virtio_scsi
-virtio_net
-virtio_pci
-virtio_ring
-virtio
-``
-
-
-3. Press **Esc**, enter **:wq**, and press **Enter** to save the settings and exit the vi editor.
-4. Run the following command to generate initrd again:
-
- **update-initramfs -u**
-
-5. Run the following commands to check whether native Xen and KVM drivers have been installed:
-
- **lsinitramfs /boot/initrd.img-\`uname -r\` |grep xen**
-
- **lsinitramfs /boot/initrd.img-\`uname -r\` |grep virtio**
-
- ``
-[root@ CTU10000xxxxx home]# lsinitramfs /boot/initrd.img-`uname -r` |grep xen
-lib/modules/3.5.0-23-generic/kernel/drivers/net/ethernet/qlogic/netxen
-lib/modules/3.5.0-23-generic/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.ko
-lib/modules/3.5.0-23-generic/kernel/drivers/net/xen-netback
-lib/modules/3.5.0-23-generic/kernel/drivers/net/xen-netback/xen-netback.ko
-lib/modules/3.5.0-23-generic/kernel/drivers/block/xen-blkback
-lib/modules/3.5.0-23-generic/kernel/drivers/block/xen-blkback/xen-blkback.ko
-
-[root@ CTU10000xxxxx home]# lsinitramfs /boot/initrd.img-`uname -r` |grep virtio
-lib/modules/3.5.0-23-generic/kernel/drivers/scsi/virtio_scsi.ko
-``
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- If you add built-in drivers to the initrd or initramfs file, the VM will not be affected. This makes it easy to modify the drivers. However, the drivers cannot be shown by running the **lsinitrd** command. You can run the following commands to check whether the drivers are built-in ones in the kernel:
-
- ``
-[root@ CTU10000xxxxx home]# cat /boot/config-`uname -r` | grep CONFIG_VIRTIO | grep y
-CONFIG_VIRTIO_BLK=y
-CONFIG_VIRTIO_NET=y
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_RING=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-[root@ CTU10000xxxxx home]# cat /boot/config-`uname -r` | grep CONFIG_XEN | grep y
-CONFIG_XEN_BLKDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_FRONTEND=y
-``
-
-
-
-#### Change the Disk Identifier in the GRUB Configuration File to the UUID Format
-
-Take Ubuntu 20.04 as an example. Run **blkid** to obtain the UUID of the root partition. Modify the **/boot/grub/grub.cfg** file and use the UUID of the root partition to configure the boot item. If the root partition already uses UUID, no modification is required. The procedure is as follows:
-
-1. Log in to the newly created VM as user **root**.
-2. Run the following command to query all types of mounted file systems and their device UUIDs:
-
- **blkid**
-
- The following information is displayed:
-
- ``
-/dev/xvda1: UUID="ec51d860-34bf-4374-ad46-a0c3e337fd34" TYPE="ext3"
-/dev/xvda5: UUID="7a44a9ce-9281-4740-b95f-c8de33ae5c11" TYPE="swap"
-``
-
-
-
-3. Run the following command to query the **grub.cfg** file:
-
- **cat /boot/grub/grub.****cfg**
-
- The following information is displayed:
-
- ``
-......menuentry 'Ubuntu Linux, with Linux 3.13.0-24-generic' --class ubuntu --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.13.0-24-generic-advanced-ec51d860-34bf-4374-ad46-a0c3e337fd34' {
-recordfail
-load_video
-gfxmode $linux_gfx_mode
-insmod gzio
-insmod part_msdos
-insmod ext2
-if [ x$feature_platform_search_hint = xy ]; then
-search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
-else
-search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
-fi
-echo 'Loading Linux 3.13.0-24-generic ...'
-linux /boot/vmlinuz-3.13.0-24-generic root=/dev/xvda1 ro
-echo 'Loading initial ramdisk ...'
-initrd /boot/initrd.img-3.13.0-24-generic
-}
-``
-
-
-
-4. Check whether the **/boot/grub/grub.cfg** configuration file contains **root=/dev/xvda1** or **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34**.
- * If **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34** is contained, the root partition is in the UUID format and no further action is required.
- * If **root=/dev/xvda1** is contained, the root partition is represented by a device name. Go to step [5](#ims_bp_0022__en-us_topic_0106036281_lf7085be4afa540f3b52640d8d9157c9a).
-
-5. Obtain the UUID of the root partition based on **root=/dev/xvda1** and information obtained by running the **blkid** command.
-6. Run the following command to open the **grub.cfg** file:
-
- **vi /boot/grub/grub.cfg**
-
-7. Press **i** to enter the editing mode. Change the identifier of the root partition to the UUID format. For example, change **root=/dev/xvda1** to **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34**.
-8. Press **Esc**, enter **:wq**, and press **Enter** to save the settings and exit the vi editor.
-9. Run the following command to verify the change:
-
- **cat /boot/grub/grub.****cfg**
-
- The change is successful if information similar to the following is displayed:
-
- ``
-......menuentry 'Ubuntu Linux, with Linux 3.13.0-24-generic' --class ubuntu --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.13.0-24-generic-advanced-ec51d860-34bf-4374-ad46-a0c3e337fd34' {
-recordfail
-load_video
-gfxmode $linux_gfx_mode
-insmod gzio
-insmod part_msdos
-insmod ext2
-if [ x$feature_platform_search_hint = xy ]; then
-search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
-else
-search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
-fi
-echo 'Loading Linux 3.13.0-24-generic ...'
-linux /boot/vmlinuz-3.13.0-24-generic root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34 ro
-echo 'Loading initial ramdisk ...'
-initrd /boot/initrd.img-3.13.0-24-generic
-}
-``
-
-
-
-#### Change the Disk Identifiers in the fstab File to the UUID Format
-
-Take Ubuntu 20.04 as an example. Run **blkid** to obtain the UUIDs of all partitions. Modify the **/etc/fstab** file and use the partition UUIDs to configure automatic partition mounting.
-
-1. Run the following command to query all types of mounted file systems and their device UUIDs:
-
- **blkid**
-
- The following information is displayed:
-
- ``
-/dev/xvda2: UUID="4eb40294-4c6f-4384-bbb6-b8795bbb1130" TYPE="xfs"
-/dev/xvda1: UUID="2de37c6b-2648-43b4-a4f5-40162154e135" TYPE="swap"
-``
-
-
-2. Run the following command to query the **fstab** file:
-
- **cat /etc/fstab**
-
- The following information is displayed:
-
- ``
-[root@CTU1000028010 ~]# cat /etc/fstab
-/dev/xvda2 / xfs defaults 0 0
-/dev/xvda1 swap swap defaults 0 0
-``
-
-
-3. Check whether the disk identifiers in the **fstab** file are device names or UUIDs.
- * If they are UUIDs, no further action is required.
- * If they are device names, go to step [4](#ims_bp_0022__en-us_topic_0106036281_li63646666154817).
-4. Run the following command to open the **fstab** file:
-
- **vi /etc/fstab**
-
-5. Press **i** to enter the editing mode and change the disk identifiers to the UUID format.
-6. Press **Esc**, enter **:wq**, and press **Enter** to save the settings and exit the vi editor.
-
-## Installing Cloud-Init
-#### Scenarios
-
-To ensure that you can use the user data injection function to inject initial custom information into ECSs created from a private image (such as setting the ECS login password), install Cloud-Init on the ECS used to create the image.
-
-* You need to download Cloud-Init from its official website. Therefore, you must bind an EIP to the ECS.
-* If Cloud-Init is not installed, you cannot configure an ECS. As a result, you can only use the password in the image file to log in to the created ECSs.
-* By default, ECSs created from a public image have Cloud-Init installed. You do not need to install or configure Cloud-Init on such ECSs.
-* For ECSs created using an external image file, install and configure Cloud-Init by performing the operations in this section. For how to configure Cloud-Init, see [Configuring Cloud-Init](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/ims_bp_0024.html#ims_bp_0024).
-
-![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
-Cloud-Init is open-source software. If the installed version has security vulnerabilities, you are advised to upgrade it to the latest version.
-
-#### Prerequisites
-
-* An EIP has been bound to the ECS.
-* You have logged in to the ECS.
-* The IP address obtaining mode of the ECS is DHCP.
-
-#### Procedure
-
-1. Check whether Cloud-Init has been installed.
-
- For details, see [Check Whether Cloud-Init Has Been Installed](#ims_bp_0023__en-us_topic_0030730603_section57525650153449).
-
-2. Install Cloud-Init.
-
- You can install Cloud-Init in any of the following ways: [(Recommended) Install Cloud-Init Using the Official Installation Package](#ims_bp_0023__en-us_topic_0030730603_section9013470154018), [Install Cloud-Init Using the Official Source Code Package and pip](#ims_bp_0023__en-us_topic_0030730603_section124220553610), and [Install Cloud-Init Using the Official GitHub Source Code](#ims_bp_0023__en-us_topic_0030730603_section14939636151511).
-
-
-#### Check Whether Cloud-Init Has Been Installed
-
-Perform the operations provided here to check whether Cloud-Init has been installed. The methods of checking whether Cloud-Init is installed vary depending on the OSs.
-
-* If you are in a Python 3 environment, run the following command to check whether Cloud-Init is installed (Ubuntu 22.0.4 is used as an example):
-
- **which cloud-init**
-
- * If information similar to the following is displayed, Cloud-Init has been installed:
-
- ``
-/usr/bin/cloud-init
-``
-
-
- * If information similar to the following is displayed, Cloud-Init is not installed:
-
- ``
-/usr/bin/which: no cloud-init in (/usr/local/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin)
-``
-
-
-* If you are in a Python 2 environment, run the following command to check whether Cloud-Init is installed (CentOS 6 is used as an example):
-
- **which cloud-init**
-
- * If information similar to the following is displayed, Cloud-Init has been installed:
-
- ``
-cloud-init-0.7.5-10.el6.centos.2.x86_64
-``
-
-
- * If no information is returned, Cloud-Init is not installed.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- To confirm Cloud-Init is really not installed, you are advised to run **rpm -qa |grep cloud-init** to check again. If either of **which cloud-init** and **rpm -qa |grep cloud-init** shows that Cloud-Init has been installed, Cloud-Init is installed.
-
-
-If Cloud-Init has been installed, perform the following operations:
-
-* Check whether to use the SSH certificate in the ECS OS. If the certificate is no longer used, delete it.
- * If the certificate is stored in a directory of user **root**, for example, _/$path/$to/$root_**/.ssh/authorized\_keys**, run the following commands:
-
- **cd /root/.ssh**
-
- **rm authorized\_keys**
-
- * If the certificate is not stored in a directory of user **root**, for example, _/$path/$to/$none-root_**/.ssh/authorized\_keys**, run the following commands:
-
- **cd /home/centos/.ssh**
-
- **rm authorized\_keys**
-
-* Run the following command to delete the cache generated by Cloud-Init and ensure that the ECS created from the private image can be logged in by using the certificate:
-
- **sudo rm -rf /var/lib/cloud/\***
-
-
-![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
-Do not restart the ECS after performing the configuration. Otherwise, you need to configure it again.
-
-#### (Recommended) Install Cloud-Init Using the Official Installation Package
-
-The method of installing Cloud-Init on an ECS varies depending on the OS. Perform the installation operations as user **root**.
-
-The following describes how to install Cloud-Init on an ECS running SUSE Linux, CentOS, Fedora, Debian, and Ubuntu. For other OS types, install the required type of Cloud-Init. For example, you need to install coreos-cloudinit on ECSs running CoreOS.
-
-* SUSE Linux
-
- Paths for obtaining the Cloud-Init installation package for SUSE Linux
-
- [https://ftp5.gwdg.de/pub/opensuse/repositories/Cloud:/Tools/](https://ftp5.gwdg.de/pub/opensuse/repositories/Cloud:/Tools)
-
- [http://download.opensuse.org/repositories/Cloud:/Tools/](http://download.opensuse.org/repositories/Cloud:/Tools/)
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- Select the required repo installation package in the provided paths.
-
- Take SUSE Enterprise Linux Server 12 as an example. Perform the following steps to install Cloud-Init:
-
- 1. Log in to the ECS used to create a Linux private image.
- 2. Run the following command to install the network installation source for SUSE Enterprise Linux Server 12:
-
- **zypper ar https://ftp5.gwdg.de/pub/opensuse/repositories/Cloud:/Tools/SLE\_12\_SP3/Cloud:Tools.repo**
-
- 3. Run the following command to update the network installation source:
-
- **zypper refresh**
-
- 4. Run the following command to install Cloud-Init:
-
- **zypper install cloud-init**
-
- 5. Run the following commands to enable Cloud-Init to automatically start upon system boot:
-
- * SUSE 11
-
- **chkconfig cloud-init-local on; chkconfig cloud-init on; chkconfig cloud-config on; chkconfig cloud-final on**
-
- **service cloud-init-local status; service cloud-init status; service cloud-config status; service cloud-final status**
-
- * SUSE 12 and openSUSE 12/13/42
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/caution_3.0-en-us.png)
-
- For SUSE and openSUSE, perform the following steps to disable dynamic change of the ECS name:
-
- 1. Run the following command to open the **dhcp** file using the vi editor:
-
- **vi** **etc/sysconfig/network/dhcp**
-
- 2. Change the value of **DHCLIENT\_SET\_HOSTNAME** in the **dhcp** file to **no**.
-
-* **CentOS**
-
- [Table 1](#ims_bp_0023__en-us_topic_0030730603_table859383892814) lists the Cloud-Init installation paths for CentOS. Select the required installation package from the following addresses.
-
- 1. Run the following commands to install Cloud-Init:
-
- **yum install** _Cloud-Init installation package address_**/epel-release-**_x-y_**.noarch.rpm**
-
- **yum install cloud-init**
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- _Cloud-Init installation package address_ indicates the address of the Cloud-Init epel-release installation package, and _x-y_ indicates the version of the Cloud-Init epel-release required by the current OS. Replace them with the actual values according to [Table 1](#ims_bp_0023__en-us_topic_0030730603_table859383892814).
-
- * Take CentOS 6 64-bit as an example. If the version is 6.8, the command is as follows:
-
- **yum install https://archives.fedoraproject.org/pub/archive/epel/6/x86\_64/epel-release-****6-8****.noarch.rpm**
-
- * Take CentOS 7 64-bit as an example. If the version is 7.14, the command is as follows:
-
- **yum install https://archives.fedoraproject.org/pub/epel/7/x86\_64/Packages/e/epel-release-****7-14****.noarch.rpm**
-
-
- 2. Run the following commands to enable Cloud-Init to automatically start upon system boot:
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-* Fedora
-
- Before installing Cloud-Init, ensure that the network installation source address has been configured for the OS by checking whether the **/etc/yum.repo.d/fedora.repo** file contains the installation source address of the software package. If the file does not contain the address, configure the address by following the instructions on the Fedora official website.
-
- 1. Run the following command to install Cloud-Init:
-
- **yum install cloud-init**
-
- 2. Run the following commands to enable Cloud-Init to automatically start upon system boot:
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-* Debian and Ubuntu
-
- Before installing Cloud-Init, ensure that the network installation source address has been configured for the OS by checking whether the **/etc/apt/sources.list** file contains the installation source address of the software package. If the file does not contain the address, configure the address by following the instructions on the Debian or Ubuntu official website.
-
- 1. Run the following commands to install Cloud-Init:
-
- **apt-get update**
-
- **apt-get install** **cloud-init**
-
- 2. Run the following commands to enable Cloud-Init to automatically start upon system boot:
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-
- **Cloud-Init-23.2.2 is used as an example to describe how to install Cloud-Init on CentOS, Fedora, Ubuntu, Debian, and SUSE.**
-
- Download the **cloud-init-23.2.2.tar.gz** source code package from [https://launchpad.net/cloud-init/trunk/23.2.2/+download/cloud-init-23.2.2.tar.gz](https://launchpad.net/cloud-init/trunk/23.2.2/+download/cloud-init-23.2.2.tar.gz).
-
-* **Centos 7/Fedora Server 36**
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/notice_3.0-en-us.png)
-
- Ensure that Python 3 has been installed.
-
- 1. Check whether Cloud-Init has been installed. If any command output is displayed, Cloud-Init has been installed.
-
- ``
-cloud-init -v
-``
-
-
- 2. Delete the cache directory of Cloud-Init.
-
- ``
-rm -rf /var/lib/cloud/*
-``
-
-
- 3. Install dependency packages of Cloud-Init.
-
- ``
-yum install python3-pip -y
-yum install python3-devel
-``
-
-
- 4. Download the Cloud-Init package.
-
- ``
-wget https://launchpad.net/cloud-init/trunk/23.2.2/+download/cloud-init-23.2.2.tar.gz
-``
-
-
- 5. Decompress the Cloud-Init package.
-
- ``
-tar -zxvf cloud-init-23.2.2.tar.gz
-``
-
-
- 6. Go to the **cloud-init-23.2.2** directory and install dependent libraries:
-
- ``
-cd cloud-init-23.2.2
-pip3 install -r requirements.txt
-``
-
-
- 7. Install Cloud-Init.
-
- ``
-python3 setup.py build
-python3 setup.py install --init-system systemd
-``
-
-
- 8. (Optional) Diable Cloud-Init's network configuration capability by modifying the **/etc/cloud/cloud.cfg** file.
-
- ``
-vi /etc/cloud/cloud.cfg
-``
-
-
- Add the following content to the file:
-
- ``
-network:
- config: disabled
-``
-
-
- 9. Restart Cloud-Init and check its status.
-
- ``
-systemctl restart cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001688060529.png)
-
- 10. Enable Cloud-Init related services to automatically start upon system boot.
-
- ``
-systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- 11. Check whether Cloud-Init is running properly.
-
- ``
-cloud-init -v
-cloud-init init --local
-``
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001639899816.png)
-
-* **Ubuntu 22.0.4/Debian 11**
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/notice_3.0-en-us.png)
-
- Ensure that Python 3 has been installed.
-
- 1. Check and delete redundant Cloud-Init configuration files.
-
- ``
-rm -rf /var/lib/cloud/*
-rm -f /var/log/cloud-init*
-``
-
-
- Delete all files except log-related configuration files from the **/etc/cloud/cloud.cfg.d/** directory.
-
- 2. Update your package list and check whether Wget is installed. If it is not, install it.
-
- ``
-sudo apt update
-sudo apt install wget
-``
-
-
- 3. Install dependency packages.
-
- ``
-apt-get install cloud-guest-utils -y
-apt-get install python3-pip -y
-apt-get install python3-devel
-``
-
-
- 4. Download the Cloud-Init package.
-
- ``
-wget https://launchpad.net/cloud-init/trunk/23.2.2/+download/cloud-init-23.2.2.tar.gz
-``
-
-
- 5. Decompress the Cloud-Init package.
-
- ``
-tar -zxvf cloud-init-23.2.2.tar.gz
-``
-
-
- 6. Go to the **cloud-init** directory.
-
- ``
-cd cloud-init-23.2.2
-``
-
-
- 7. Install dependent libraries.
-
- ``
-pip3 install -r requirements.txt
-``
-
-
- 8. Install Cloud-Init.
-
- ``
-python3 setup.py install
-``
-
-
- 9. (Optional) Disable Cloud-Init's network configuration capability.
-
- You need to do so when the Cloud-Init version is 0.7.9 or later and you want to configure the network.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- 1\. Open the **/etc/cloud/cloud.cfg** file.
-
- ``
-vi /etc/cloud/cloud.cfg
-``
-
-
- 2\. Enter **i** and configure **network**. (If there is no such a configuration item, add it.)
-
- ``
-network:
- config: disabled
-``
-
-
- 10. Restart Cloud-Init and check its status.
-
- ``
-systemctl restart cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- 11. Enable Cloud-Init related services to automatically start upon system boot.
-
- ``
-systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- 12. Check whether Cloud-Init is running properly.
-
- ``
-cloud-init -v
-cloud-init init --local
-``
-
-
-* **SUSE Enterprise Linux Server 15**
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/notice_3.0-en-us.png)
-
- Ensure that Python 3 has been installed.
-
- 1. View existing SUSE repositories.
-
- ``
-zypper lr
-``
-
-
- 2. Delete the SUSE repositories.
-
- ``
-zypper rr No. of repositories listed in 1
-``
-
-
- 3. Configure a SUSE repository.
-
- ``
-zypper ar https://ftp5.gwdg.de/pub/opensuse/repositories/Cloud:/Tools/SLE_15_SP4/Cloud:Tools.repo
-``
-
-
- 4. Refresh the SUSE repository.
-
- ``
-zypper refresh
-``
-
-
- 5. Install Cloud-Init.
-
- ``
-zypper install cloud-init
-``
-
-
- 6. Run **cloud-init -v**. If error messages similar to the following are displayed, install the dependency packages.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001688251829.png)
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001640131918.png)
-
- ``
-pip install requests pyyaml oauthlib jsonschema jsonpatch jinja2 configobj
-``
-
-
- 7. Check whether Cloud-Init is successfully installed. If the following error message is displayed, configure **datasource\_list** in **/etc/cloud/cloud.cfg**.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001639765024.png)
-
- ``
-datasource_list: [ OpenStack ]
-datasource:
- OpenStack:
- metadata_urls: ['http://169.254.169.254']
- max_wait: 120
- timeout: 5
- apply_network_config: false
-``
-
-
- 8. Modify the configuration file, restart Cloud-Init, and check the Cloud-Init status.
-
- ``
-systemctl restart cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- 9. Enable Cloud-Init related services to automatically start upon system boot.
-
- ``
-systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service
-``
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001688204517.png)
-
- 10. Check whether Cloud-Init is running properly.
-
- ``
-cloud-init -v
-cloud-init init --local
-``
-
-
-
-#### Install Cloud-Init Using the Official Source Code Package and pip
-
-The following operations use Cloud-Init 0.7.9 as an example to describe how to install Cloud-Init.
-
-1. Download the **cloud-init-0.7.9.tar.gz** source code package (version 0.7.9 is recommended) and upload it to the **/home/** directory of the ECS.
-
- Download **cloud-init-0.7.9.tar.gz** from the following path:
-
- [https://launchpad.net/cloud-init/trunk/0.7.9/+download/cloud-init-0.7.9.tar.gz](https://launchpad.net/cloud-init/trunk/0.7.9/+download/cloud-init-0.7.9.tar.gz)
-
-2. Create a **pip.conf** file in the **~/.pip/** directory and edit the following content:
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- If the **~/.pip/** directory does not exist, run the **mkdir ~/.pip** command to create it.
-
- ``
-[global]
-index-url = https://<$mirror>/simple/
-trusted-host = <$mirror>
-``
-
-
-3. Run the following command to install the downloaded Cloud-Init source code package (select **\--upgrade** as needed during installation):
-
- **pip install \[--upgrade\] /home/cloud-init-0.7.9.tar.gz**
-
-4. Run the **cloud-init -v** command. Cloud-Init is installed successfully if the following information is displayed:
-
- ``
-cloud-init 0.7.9
-``
-
-
-5. Enable Cloud-Init to automatically start upon system boot.
- * If the OS uses SysVinit to manage automatic start of services, run the following commands:
-
- **chkconfig --add cloud-init-local; chkconfig --add cloud-init; chkconfig --add cloud-config; chkconfig --add cloud-final**
-
- **chkconfig cloud-init-local on; chkconfig cloud-init on; chkconfig cloud-config on; chkconfig cloud-final on**
-
- **service cloud-init-local status; service cloud-init status; service cloud-config status; service cloud-final status**
-
- * If the OS uses Systemd to manage automatic start of services, run the following commands:
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-
-![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/caution_3.0-en-us.png)
-
-If you install Cloud-Init using the official source code package and pip, pay attention to the following:
-
-1. Add user **syslog** to the **adm** group during the installation. If user **syslog** exists, add it to the **adm** group. For some OSs (such as CentOS and SUSE), user **syslog** may not exist. Run the following commands to create user **syslog** and add it to the **adm** group:
-
- **useradd syslog**
-
- **groupadd adm**
-
- **usermod -g adm syslog**
-
-2. Change the value of **distro** in **system\_info** in the **/etc/cloud/cloud.cfg** file based on the OS release version, such as **distro: ubuntu**, **distro: sles**, **distro: debian**, and **distro: fedora**.
-
-#### Install Cloud-Init Using the Official GitHub Source Code
-
-You can obtain the Cloud-Init source code from GitHub at [https://github.com/canonical/cloud-init/](https://github.com/canonical/cloud-init/)
-
-1. Run the following commands to download the source code package and copy it to the **/tmp/CLOUD-INIT** folder:
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- **wget** **https://github.com/canonical/cloud-init/archive/refs/tags/0.7.6.zip**
-
- **mkdir /tmp/CLOUD-INIT**
-
- **cp **cloud-init-0.7.6.zip** /tmp/CLOUD-INIT**
-
- **cd /tmp/CLOUD-INIT**
-
-2. Run the following command to decompress the package:
-
- **unzip **cloud-init-0.7.6.zip****
-
-3. Run the following command to enter the **cloud-init-0.7.6** folder:
-
- **cd **cloud-init**\-0.7.6**
-
-4. (Optional) If the Cloud-Init version is 18.3 to 22.3, run the following commands:
-
- **sed -i '/VALID\_DMI\_ASSET\_TAGS =/a\\VALID\_DMI\_ASSET\_TAGS += \["HUAWEICLOUD"\]' cloudinit/sources/DataSourceOpenStack.py**
-
- **cat cloudinit/sources/DataSourceOpenStack.py | grep VALID\_DMI\_ASSET\_TAGS**
-
- If the following information is displayed, the execution is successful.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/en-us_image_0000001390619817.png)
-
-5. Install Cloud-Init. The commands vary depending on the OS type.
-
- * For CentOS 6.x or SUSE 11.x, run the following commands:
-
- **python setup.py build**
-
- **python setup.py install --init-system sysvinit**
-
- * For CentOS 7.x or SUSE 12.x, run the following commands:
-
- **python setup.py build**
-
- **python setup.py install --init-system systemd**
-
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- Add user **syslog** to the **adm** group during the installation. If user **syslog** exists, add it to the **adm** group. For some OSs (such as CentOS and SUSE), user **syslog** may not exist. Run the following commands to create user **syslog** and add it to the **adm** group:
-
- **useradd syslog**
-
- **groupadd adm**
-
- **usermod -g adm syslog**
-
-6. Enable Cloud-Init to automatically start upon system boot.
- * If the OS uses SysVinit to manage automatic start of services, run the following commands:
-
- **chkconfig --add cloud-init-local; chkconfig --add cloud-init; chkconfig --add cloud-config; chkconfig --add cloud-final**
-
- **chkconfig cloud-init-local on; chkconfig cloud-init on; chkconfig cloud-config on; chkconfig cloud-final on**
-
- **service cloud-init-local status; service cloud-init status; service cloud-config status; service cloud-final status**
-
- * If the OS uses Systemd to manage automatic start of services, run the following commands:
-
- **systemctl enable cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
- **systemctl status cloud-init-local.service cloud-init.service cloud-config.service cloud-final.service**
-
-7. Run the following commands to check whether Cloud-Init has been installed:
-
- **cloud-init -v**
-
- **cloud-init init --local**
-
- Cloud-Init is successfully installed if the following information is displayed:
-
- ``
-cloud-init 0.7.6
-``
-
-## (Optional) Installing the One-Click Password Reset Plug-in_Image Management Service
-
-To ensure that you can reset the password of each ECS created from the image with a few clicks, you are advised to install the one-click password reset plug-in (CloudResetPwdAgent) on the VM which is used as the image source.
-
-#### Procedure
-
-1. Download the CloudResetPwdAgent software package.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- The one-click password reset plug-in can be automatically updated only if an EIP is bound to the VM.
-
- You can download the **CloudResetPwdAgent.zip** package from:
-
- For 32-bit OSs: [http://ap-southeast-1-cloud-reset-pwd.obs.ap-southeast-1.myhuaweicloud.com/linux/32/reset\_pwd\_agent/CloudResetPwdAgent.zip](http://ap-southeast-1-cloud-reset-pwd.obs.ap-southeast-1.myhuaweicloud.com/linux/32/reset_pwd_agent/CloudResetPwdAgent.zip)
-
- For 64-bit OSs: [http://ap-southeast-1-cloud-reset-pwd.obs.ap-southeast-1.myhuaweicloud.com/linux/64/reset\_pwd\_agent/CloudResetPwdAgent.zip](http://ap-southeast-1-cloud-reset-pwd.obs.ap-southeast-1.myhuaweicloud.com/linux/64/reset_pwd_agent/CloudResetPwdAgent.zip)
-
-2. Run the following command to decompress **CloudResetPwdAgent.zip**:
-
- **unzip -o -d** _Decompressed directory_ **CloudResetPwdAgent.zip**
-
- There is no special requirement for the directory that stores the decompressed **CloudResetPwdAgent.zip**. You can choose a directory as you need. If the directory is **/home/PwdAgent/test**, the command is as follows:
-
- **unzip -o -d /home/PwdAgent/test CloudResetPwdAgent.zip**
-
-3. Install the one-click password reset plug-in.
- 1. Run the following command to open the **CloudResetPwdAgent.Linux** file:
-
- **cd CloudResetPwdAgent/CloudResetPwdAgent.Linux**
-
- 2. Run the following command to add the execute permission for the **setup.sh** file:
-
- **chmod +x setup.sh**
-
- 3. Run the following command to install the plug-in:
-
- **sudo sh setup.sh**
-
- 4. Run the following command to check whether the installation is successful:
-
- **service cloudResetPwdAgent status**
-
- If the status of CloudResetPwdAgent is not **unrecognized service**, the installation is successful. Otherwise, the installation failed.
-
- ![](https://support.huaweicloud.com/intl/en-us/bestpractice-ims/public_sys-resources/note_3.0-en-us.png)
-
- If the installation failed, check whether the installation environment meets requirements and install the plug-in again.
-
-## Configuring NetworkManager
-
-Linux allows you to use NetworkManager to automatically configure the VM network. You are advised to use NetworkManager for new OS versions.
-
-Alternatively, you can use the native network management service of the OS.
-
-#### Red Hat, Oracle, CentOS 6.x, CentOS 7.x, EulerOS 2.x, Fedora 22, or Later
-
-Install NetworkManager and use it for automatic network configuration.
-
-1. Run the following command to install NetworkManager:
-
- **yum install NetworkManager**
-
-2. Delete **ifcfg-eth1** to **ifcfg-eth11** from the **/etc/sysconfig/network-scripts/** directory and retain only **ifcfg-eth0**.
-3. Run the following command to disable the network:
-
- **service network stop**
-
-4. Run the following command to disable automatic startup of the network:
-
- **chkconfig network off**
-
-5. Run the following commands to restart messagebus and NetworkManager and enable NetworkManager to start automatically at startup:
-
- **service messagebus restart**
-
- **service NetworkManager restart**
-
- **chkconfig NetworkManager on**
-
-
-#### Debian 9.0 or Later
-
-Install NetworkManager and use it for automatic network configuration.
-
-1. Run the following command to install NetworkManager:
-
- **apt-get install network-manager**
-
-2. Change the value of **managed** in the **/etc/NetworkManager/NetworkManager.conf** file to **true**.
-3. Modify **/etc/network/interfaces** and retain only **eth0**.
-4. Run the following commands to disable the network, restart messagebus and NetworkManager, and enable NetworkManager to start automatically at startup:
-
- **service network-manager restart**
-
- **chkconfig network-manager on**
-
- **service networking stop**
-
- **service messagebus restart**
-
- **service network-manager restart**
-
-
-#### Ubuntu 14 or Later
-
-Install NetworkManager and use it for automatic network configuration.
-
-1. Run the following command to install NetworkManager:
-
- **apt-get install network-manager**
-
-2. Change the value of **managed** in the **/etc/NetworkManager/NetworkManager.conf** file to **true**.
-3. Modify **/etc/network/interfaces** and retain only **eth0**.
-4. Run the following command to disable the network:
-
- **service networking stop**
-
-5. Run the following command to disable automatic startup of the network:
-
- **chkconfig network off**
-
-6. Run the following commands to restart D-Bus and NetworkManager:
-
- **service dbus restart**
-
- **service network-manager restar****t**
-
-
-#### SUSE 11 SP3 and openSUSE 13 or Later
-
-Install NetworkManager and use it for automatic network configuration.
-
-1. Delete **ifcfg-eth1** to **ifcfg-eth11** from the **/etc/sysconfig/network-scripts/** directory and retain only **ifcfg-eth0**.
-2. Run the following command to install NetworkManager:
-
- **zypper install NetworkManager**
-
-3. Start YaST, choose **Network Devices** in the navigation pane on the left, and select **Network Settings** in the right pane. In the **Network Setup Method** area of the **Global Options** page, change **Traditional Method with ifup** to **User Controlled with NetworkManager**
-
-
-Step 4: Obtaining the Image File_Image Management Service
----------------------------------------------------------
-Updated on 2022-08-29 GMT+08:00
-
-After the VM is configured, perform the following operations to generate and export a Linux image file:
-
-1. Open VirtualBox, select the VM, choose **Settings** > **Storage**, and select **Linux.vhd**.
-
- **Linux** is the VM name.
-
-2. On the right pane, view the image file location.
-3. Go to the location to obtain the generated **Linux.vhd** image file.
-
-Step 5: Registering the Image File as a Private Image
------------------------------------------------------
-Upload the image file to an OBS bucket and register it as a private image.
-
-#### Constraints
-
-* Only an unencrypted image file or an image file encrypted using SSE-KMS can be uploaded to an OBS bucket.
-* When uploading an image file, you must select an OBS bucket with the storage class of Standard.
-
-#### Procedure
-
-1. Use OBS Browser+ to upload the image file. For details, see [OBS Browser+ Best Practices](https://support.huaweicloud.com/intl/en-us/browsertg-obs/obs_03_1006.html).
-
- For how to download OBS Browser+, see [https://support.huaweicloud.com/intl/en-us/browsertg-obs/obs\_03\_1003.html](https://support.huaweicloud.com/intl/en-us/browsertg-obs/obs_03_1003.html).
-
-2. Register the external image file as a private image. For details, see [Registering an Image File as a Private Image (Linux)](https://support.huaweicloud.com/intl/en-us/usermanual-ims/ims_01_0211.html).
\ No newline at end of file
diff --git a/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.mdx b/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.mdx
new file mode 100644
index 000000000..44d7fb29b
--- /dev/null
+++ b/docs/best-practices/computing/image-management-service/creating-a-linux-Image-using-virtualBox-and-an-iso-file.mdx
@@ -0,0 +1,560 @@
+---
+id: creating-a-linux-Image-using-virtualBox-and-an-iso-file
+title: Creating a Linux Image Using VirtualBox and an ISO File
+tags: [ims, virtualbox]
+---
+
+# Creating a Linux Image Using VirtualBox and an ISO File
+
+[VirtualBox](https://www.virtualbox.org/) is free, open-source virtualization software. It was first offered by InnoTek GmbH from Germany and re-branded as Oracle VM VirtualBox when InnoTek was acquired by Oracle Corporation.
+
+For more information about VirtualBox, visit the Oracle official website. Click [here](https://www.virtualbox.org/wiki/Guest_OSes) to see the guest OSs that can work with VirtualBox.
+
+You can use a 32-bit or 64-bit Linux guest OS provided by VirtualBox to create an image file in VHD format.
+The following figure shows how to use VirtualBox to create an image from an ISO file.
+
+![**Figure 1** Image creation process](/img/docs/best-practices/computing/image-management-service/en-us_image_0200645302.png)
+
+## Installing VirtualBox
+
+### Prerequisites
+
+The host where VirtualBox is to be installed must meet the following requirements:
+
+* A 64-bit Windows OS (recommended).
+* At least 4 GB of memory and a dual-core processor. For example, the host specifications can be 8U16G.
+* At least 20 GB of available disk space.
+* Hardware virtualization (Intel VT-x or AMD-V). For how to enable this, see [Configuring Host BIOS CPU Settings](#configuring-host-bios-cpu-settings).
+
+:::note
+For details about how to install VirtualBox, see the VirtualBox user guide at
+[https://www.virtualbox.org/manual/UserManual.html](https://www.virtualbox.org/manual/UserManual.html).
+:::
+
+### Configuring Host BIOS CPU Settings
+
+For an **Intel host**, perform the following operations to enable hardware virtualization:
+
+:::warning
+The operations **may differ depending on the CPU type**. You can do it as prompted.
+:::
+
+1. During the host startup, press *the BIOS key set by the manufacturer to access the BIOS*.
+2. Choose *Configuration* -> *Intel Virtual Technology*, and press *Enter*.
+3. Select *Enabled* and press *Enter*. The value of *Intel Virtual Technology* will become *Enabled*.
+4. Press *F10* to save the settings and exit.
+
+ **Figure 1** Enabling hardware virtualization
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0107215471.png)
+
+### Installing VirtualBox Binaries
+
+1. Download the VirtualBox installation package. VirtualBox-5.2.0 is used as an example.
+
+ Download it from [https://www.virtualbox.org/wiki/Downloads](https://www.virtualbox.org/wiki/Downloads).
+
+2. Decompress the package. Right-click **VirtualBox-5.2.0-118431-Win.exe**, choose *Run as administrator*, and click *Next*.
+
+ **Figure 2** Installing VirtualBox
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0107215473.png)
+
+3. Select the VirtualBox installation path and click *Next*.
+
+ **Figure 3** Selecting an installation path
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0107215475.png)
+
+4. Personalize the settings and click *Next*.
+
+ **Figure 4** Personalized settings
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0107215477.png)
+
+5. Click *Finish*.
+
+## Creating a VM and Installing an OS
+
+### Creating an Empty VM
+
+1. Open VirtualBox and click *New*. In the displayed *Create Virtual Machine* dialog box, enter a VM name, select an OS type and version, and click *Next*.
+
+ Take Ubuntu as an example. The type must be **Linux**. Ensure that the selected version is the same as that of the OS you want to install on the VM.
+
+ **Figure 1** Creating a VM
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268280658.png)
+
+2. In the *Memory size* dialog box, set a value and click *Next*.
+
+ You can reference the VM specifications or official OS requirements. The minimum value is 256 MB. You can set the memory size to 512 MB as an example.
+
+ **Figure 2** Setting the memory size
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268284967.png)
+
+3. In the *Hard disk* dialog box, select *Create a virtual hard disk now* and click *Create*.
+
+ **Figure 3** Creating a virtual hard disk
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268287010.png)
+
+4. In the *Hard disk file type* dialog box, select *VHD* and click *Next*.
+
+ **Figure 4** Setting the hard disk file type
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268287244.png)
+
+5. In the *Storage on physical hard disk* dialog box, select *Dynamically allocated* and click *Next*.
+
+ **Figure 5** Selecting the disk allocation mode
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268288436.png)
+
+6. In the *File location and size* dialog box, set the disk size and storage location.
+
+ For example, you can set the disk size to 20 GB.
+
+ **Figure 6** Setting the disk location and size
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268290676.png)
+
+7. Click *Create*.
+
+### Installing a Linux OS on the VM
+
+The procedure varies depending on the image file you use. This section uses Ubuntu 20.04 as an example to describe how to install a Linux OS on the VM.
+
+:::note
+Make sure you have obtained the ISO image file of your target OS, for example, **Ubuntu-20.04-server.iso**.
+:::
+
+Use the ISO file to install Linux for the empty VM.
+
+1. In VirtualBox Manager, select the new VM and click *Settings*.
+
+ **Figure 1** Setting the VM
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268393798.png)
+
+2. Choose *Storage* -> *Empty*, click ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268393752.png) in the *Attributes* area, and select the ISO image file **Ubuntu-20.04-server.iso**.
+
+ **Figure 2** Selecting the ISO file to be mounted
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268393846.png)
+
+ **Figure 3** Mounted ISO file
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001457709502.png)
+
+3. Click *OK*.
+4. In VirtualBox Manager, select the new VM and click *Start*.
+
+ **Figure 4** Starting the VM
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0268337032.png)
+
+5. Install the OS.
+ 1. Select *English* and press *Enter*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514305585.png)
+
+ 2. Select *Continue without updating*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001464149800.png)
+
+ 3. Retain the default settings for the keyboard. Select *Done*
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514311097.png)
+
+ 4. Retain the default settings for the installation base. Select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514670405.png)
+
+ 5. Retain the default settings for the network. Select *Done*.
+
+ The installation program will automatically identify the IP address. If the network cannot be found, the installation program can still continue and you can configure the network again after the installation is complete.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001463836772.png)
+
+ 6. Retain the default settings for the proxy. Select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514558429.png)
+
+ 7. Retain the default settings for the software source. Select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001463840256.png)
+
+ 8. Retain the default settings for disk partitioning (use an entire disk and set up this disk as an LVM group). Select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514561077.png)
+
+ The file system information will be displayed. Check it and select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001463841764.png)
+
+ Confirm the destructive action and select *Continue*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514681605.png)
+
+ 9. Configure the server name, username, and password. Select *Done*.
+
+ **Your name**: It is not a username for logging in to the server. You can consider it as server description.
+
+ **Your Server's name**: It is a unique server name on the same network. The name cannot contain uppercase letters.
+
+ **Pick a username**: It is a username for logging in to the server. If you forget it or its password, you will not be allowed to log in to the server.
+
+ **Choose a password**: It is the password for logging in to the server.
+
+ **Confirm your password**: Enter your password again.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001464161876.png)
+
+ 10. Install SSH so that you can remotely connect to the Linux server.
+
+ Select *Install OpenSSH server*. Then, press *TAB* to select *Done*.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001514587325.png)
+
+ 11. Select *Done* to start the OS installation.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001464002560.png)
+
+ 12. After the installation is complete, select *Reboot* to restart the system.
+
+ ![](/img/docs/best-practices/computing/image-management-service/en-us_image_0000001464031938.png)
+
+## Configuring the VM
+
+### Installing Drivers and Changing the Disk Identifiers to the UUID Format
+
+To ensure that the ECSs created from the image support both Xen and KVM virtualization, install Native Xen and KVM drivers and change the disk identifiers to the UUID format for the VM which is used as the image source.
+
+This section describes how to perform these operations on a Linux VM that runs Ubuntu 20.04. For other OSs, see [Optimization Process (Linux)](https://docs.otc.t-systems.com/image-management-service/umn/managing_private_images/optimizing_a_linux_private_image/optimization_process.html).
+
+1. Run the following command to open the **modules** file:
+
+ **vi /etc/initramfs-tools/modules**
+
+2. Press *i* to enter the editing mode and add the native Xen (xen-pv) and KVM (virtio) drivers to the **/etc/initramfs-tools/modules** file (the format depends on the OS requirements).
+
+ ```shell
+ [root@CTU10000xxxxx ~]#vi /etc/initramfs-tools/modules
+ ...
+ # Examples:
+ #
+ # raid1
+ # sd_mOd
+ xen-blkfront
+ xen-netfront
+ virtio_blk
+ virtio_scsi
+ virtio_net
+ virtio_pci
+ virtio_ring
+ virtio
+ ```
+
+3. Press *ESC*, enter **:wq**, and press *Enter* to save the settings and exit the vi editor.
+4. Run the following command to generate initrd again:
+
+ ```shell
+ update-initramfs -u
+ ```
+
+5. Run the following commands to check whether native Xen and KVM drivers have been installed:
+
+ ```shell
+ lsinitramfs /boot/initrd.img-\`uname -r\` |grep xen
+
+ lsinitramfs /boot/initrd.img-\`uname -r\` |grep virtio
+ ```
+
+ ```shell
+ [root@ CTU10000xxxxx home]# lsinitramfs /boot/initrd.img-`uname -r` |grep xen
+ lib/modules/3.5.0-23-generic/kernel/drivers/net/ethernet/qlogic/netxen
+ lib/modules/3.5.0-23-generic/kernel/drivers/net/ethernet/qlogic/netxen/netxen_nic.ko
+ lib/modules/3.5.0-23-generic/kernel/drivers/net/xen-netback
+ lib/modules/3.5.0-23-generic/kernel/drivers/net/xen-netback/xen-netback.ko
+ lib/modules/3.5.0-23-generic/kernel/drivers/block/xen-blkback
+ lib/modules/3.5.0-23-generic/kernel/drivers/block/xen-blkback/xen-blkback.ko
+
+ [root@ CTU10000xxxxx home]# lsinitramfs /boot/initrd.img-`uname -r` |grep virtio
+ lib/modules/3.5.0-23-generic/kernel/drivers/scsi/virtio_scsi.ko
+ ```
+
+ :::note
+ If you add built-in drivers to the initrd or initramfs file, the VM will not be affected. This makes it easy to modify the drivers. However, the drivers cannot be shown by running the lsinitrd command. You can run the following commands to check whether the drivers are built-in ones in the kernel:
+
+ ```shell
+ [root@ CTU10000xxxxx home]# cat /boot/config-`uname -r` | grep CONFIG_VIRTIO | grep y
+ CONFIG_VIRTIO_BLK=y
+ CONFIG_VIRTIO_NET=y
+ CONFIG_VIRTIO=y
+ CONFIG_VIRTIO_RING=y
+ CONFIG_VIRTIO_PCI=y
+ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+ [root@ CTU10000xxxxx home]# cat /boot/config-`uname -r` | grep CONFIG_XEN | grep y
+ CONFIG_XEN_BLKDEV_FRONTEND=y
+ CONFIG_XEN_NETDEV_FRONTEND=y
+ ```
+ :::
+
+### Changing the Disk Identifier in the GRUB Configuration File to the UUID Format
+
+Take Ubuntu 20.04 as an example. Run **blkid** to obtain the UUID of the root partition. Modify the **/boot/grub/grub.cfg** file and use the UUID of the root partition to configure the boot item. If the root partition already uses UUID, no modification is required. The procedure is as follows:
+
+1. Log in to the newly created VM as user **root**.
+2. Run the following command to query all types of mounted file systems and their device UUIDs:
+
+ ```shell
+ blkid
+ ```
+
+ The following information is displayed:
+
+ ```shell
+ /dev/xvda1: UUID="ec51d860-34bf-4374-ad46-a0c3e337fd34" TYPE="ext3"
+ /dev/xvda5: UUID="7a44a9ce-9281-4740-b95f-c8de33ae5c11" TYPE="swap"
+ ```
+
+3. Run the following command to query the **grub.cfg** file:
+
+ ```shell
+ cat /boot/grub/grub.****cfg
+ ```
+
+ The following information is displayed:
+
+ ```shell
+ ......menuentry 'Ubuntu Linux, with Linux 3.13.0-24-generic' --class ubuntu --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.13.0-24-generic-advanced-ec51d860-34bf-4374-ad46-a0c3e337fd34' {
+ recordfail
+ load_video
+ gfxmode $linux_gfx_mode
+ insmod gzio
+ insmod part_msdos
+ insmod ext2
+ if [ x$feature_platform_search_hint = xy ]; then
+ search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
+ else
+ search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
+ fi
+ echo 'Loading Linux 3.13.0-24-generic ...'
+ linux /boot/vmlinuz-3.13.0-24-generic root=/dev/xvda1 ro
+ echo 'Loading initial ramdisk ...'
+ initrd /boot/initrd.img-3.13.0-24-generic
+ }
+ ```
+
+4. Check whether the **/boot/grub/grub.cfg** configuration file contains **root=/dev/xvda1** or **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34**.
+ * If **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34** is contained, the root partition is in the UUID format and no further action is required.
+ * If **root=/dev/xvda1** is contained, the root partition is represented by a device name. Go to step 5.
+
+5. Obtain the UUID of the root partition based on **root=/dev/xvda1** and information obtained by running the **blkid** command.
+6. Run the following command to open the **grub.cfg** file:
+
+ ```shell
+ vi /boot/grub/grub.cfg
+ ```
+
+7. Press *i* to enter the editing mode. Change the identifier of the root partition to the UUID format. For example, change **root=/dev/xvda1** to **root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34**.
+8. Press *ESC*, enter **:wq**, and press *Enter* to save the settings and exit the vi editor.
+9. Run the following command to verify the change:
+
+ ```shell
+ cat /boot/grub/grub.cfg
+ ```
+
+ The change is successful if information similar to the following is displayed:
+
+ ```shell
+ ......menuentry 'Ubuntu Linux, with Linux 3.13.0-24-generic' --class ubuntu --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.13.0-24-generic-advanced-ec51d860-34bf-4374-ad46-a0c3e337fd34' {
+ recordfail
+ load_video
+ gfxmode $linux_gfx_mode
+ insmod gzio
+ insmod part_msdos
+ insmod ext2
+ if [ x$feature_platform_search_hint = xy ]; then
+ search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
+ else
+ search --no-floppy --fs-uuid --set=root ec51d860-34bf-4374-ad46-a0c3e337fd34
+ fi
+ echo 'Loading Linux 3.13.0-24-generic ...'
+ linux /boot/vmlinuz-3.13.0-24-generic root=UUID=ec51d860-34bf-4374-ad46-a0c3e337fd34 ro
+ echo 'Loading initial ramdisk ...'
+ initrd /boot/initrd.img-3.13.0-24-generic
+ }
+ ```
+
+### Changing the Disk Identifiers in the fstab File to the UUID Format
+
+Take Ubuntu 20.04 as an example. Run **blkid** to obtain the UUIDs of all partitions. Modify the **/etc/fstab** file and use the partition UUIDs to configure automatic partition mounting.
+
+1. Run the following command to query all types of mounted file systems and their device UUIDs:
+
+ ```shell
+ blkid
+ ```
+
+ The following information is displayed:
+
+ ```shell
+ /dev/xvda2: UUID="4eb40294-4c6f-4384-bbb6-b8795bbb1130" TYPE="xfs"
+ /dev/xvda1: UUID="2de37c6b-2648-43b4-a4f5-40162154e135" TYPE="swap"
+ ```
+
+2. Run the following command to query the **fstab** file:
+
+ ```shell
+ cat /etc/fstab
+ ```
+
+ The following information is displayed:
+
+ ```shell
+ [root@CTU1000028010 ~]# cat /etc/fstab
+ /dev/xvda2 / xfs defaults 0 0
+ /dev/xvda1 swap swap defaults 0 0
+ ```
+
+3. Check whether the disk identifiers in the **fstab** file are device names or UUIDs.
+ * If they are UUIDs, no further action is required.
+ * If they are device names, go to step 4.
+4. Run the following command to open the **fstab** file:
+
+ ```shell
+ vi /etc/fstab
+ ```
+
+5. Press *i* to enter the editing mode and change the disk identifiers to the UUID format.
+6. Press *ESC*, enter **:wq**, and press *Enter* to save the settings and exit the vi editor.
+
+### Installing Cloud-Init
+
+:::note
+For more information on cloud-init check this [link](https://docs.otc.t-systems.com/image-management-service/umn/linux_operations/installing_cloud-init.html).
+:::
+
+### Configuring Cloud-Init
+
+:::note
+For more information on cloud-init check this [link](https://docs.otc.t-systems.com/image-management-service/umn/linux_operations/configuring_cloud-init.html).
+:::
+
+### Configuring NetworkManager
+
+Linux allows you to use **NetworkManager** to automatically configure the VM network. You are advised to use NetworkManager for new OS versions.
+
+Alternatively, you can use the native network management service of the OS.
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
+
+ 1. Run the following command to install NetworkManager:
+
+ ```shell
+ yum install NetworkManager
+ ```
+
+ 2. Delete **ifcfg-eth1** to **ifcfg-eth11** from the **/etc/sysconfig/network-scripts/** directory and retain only **ifcfg-eth0**.
+ 3. Run the following command to disable the network:
+
+ ```shell
+ service network stop
+ ```
+ 4. Run the following command to disable automatic startup of the network:
+
+ ```shell
+ chkconfig network off
+ ```
+
+ 5. Run the following commands to restart messagebus and NetworkManager and enable NetworkManager to start automatically at startup:
+
+ ```shell
+ service messagebus restart
+ service NetworkManager restart
+ chkconfig NetworkManager on
+ ```
+
+
+ Install NetworkManager and use it for automatic network configuration.
+ 1. Run the following command to install NetworkManager:
+
+ ```shell
+ apt-get install network-manager
+ ```
+
+ 2. Change the value of **managed** in the **/etc/NetworkManager/NetworkManager.conf** file to **true**.
+ 3. Modify **/etc/network/interfaces** and retain only **eth0**.
+ 4. Run the following commands to disable the network, restart messagebus and NetworkManager, and enable NetworkManager to start automatically at startup:
+
+ ```shell
+ service network-manager restart
+ chkconfig network-manager on
+ service networking stop
+ service messagebus restart
+ service network-manager restart
+ ```
+
+
+
+ Install NetworkManager and use it for automatic network configuration.
+
+ 1. Run the following command to install NetworkManager:
+
+ ```shell
+ apt-get install network-manager
+ ```
+
+ 2. Change the value of **managed** in the **/etc/NetworkManager/NetworkManager.conf** file to **true**.
+ 3. Modify **/etc/network/interfaces** and retain only **eth0**.
+ 4. Run the following command to disable the network:
+
+ ```shell
+ service networking stop
+ ```
+ 5. Run the following command to disable automatic startup of the network:
+
+ ```shell
+ chkconfig network off
+ ```
+ 6. Run the following commands to restart D-Bus and NetworkManager:
+
+ ```shell
+ service dbus restart
+ service network-manager restart
+ ```
+
+
+
+ Install NetworkManager and use it for automatic network configuration.
+
+ 1. Delete **ifcfg-eth1** to **ifcfg-eth11** from the **/etc/sysconfig/network-scripts/** directory and retain only **ifcfg-eth0**.
+ 2. Run the following command to install NetworkManager:
+
+ ```shell
+ zypper install NetworkManager
+ ```
+ 3. Start YaST, choose **Network Devices** in the navigation pane on the left, and select **Network Settings** in the right pane. In the **Network Setup Method** area of the **Global Options** page, change **Traditional Method with ifup** to **User Controlled with NetworkManager**.
+
+
+
+
+## Obtaining the Image File
+
+After the VM is configured, perform the following operations to generate and export a Linux image file:
+
+1. Open VirtualBox, select the VM, choose *Settings* -> *Storage*, and select **Linux.vhd**.
+2. On the right pane, view the image file location.
+3. Go to the location to obtain the generated **Linux.vhd** image file.
+
+## Registering the Image File as a Private Image
+
+Upload the image file to an OBS bucket and register it as a private image.
+
+:::important
+* Only an unencrypted image file or an image file encrypted using SSE-KMS can be uploaded to an OBS bucket.
+* When uploading an image file, you must select an OBS bucket with the storage class of Standard.
+:::
+
+1. Use OBS Browser+ to upload the image file. For details, see [OBS Browser+ Best Practices](https://docs.otc.t-systems.com/object-storage-service/tool-guide/best_practices/index.html).
+
+ For how to download OBS Browser+, see [https://docs.otc.t-systems.com/object-storage-service/tool-guide/downloading_obs_browser.html](https://docs.otc.t-systems.com/object-storage-service/tool-guide/downloading_obs_browser.html).
+
+2. Register the external image file as a private image. For details, see [Registering an Image File as a Private Image (Linux)](https://docs.otc.t-systems.com/image-management-service/umn/creating_a_private_image/creating_a_linux_system_disk_image_from_an_external_image_file/registering_an_external_image_file_as_a_private_image.html).
diff --git a/docs/best-practices/containers/cloud-container-engine/issue-an-acme-certificate-with-dns01-solver-in-cce.md b/docs/best-practices/containers/cloud-container-engine/issue-an-acme-certificate-with-dns01-solver-in-cce.md
new file mode 100644
index 000000000..6ee8866bd
--- /dev/null
+++ b/docs/best-practices/containers/cloud-container-engine/issue-an-acme-certificate-with-dns01-solver-in-cce.md
@@ -0,0 +1,242 @@
+---
+id: issue-an-acme-certificate-with-dns01-solver-in-cce
+title: Issue an ACME Certificate with DNS01 Solver in CCE
+tags: [cce, acme, lets-encrypt, certificates, cert-manager, dns-solver, cert-manager-webhook-opentelekomcloud]
+---
+
+# Issue an ACME Certificate with DNS01 Solver in CCE
+
+A DNS01 challenge is a type of challenge used in the Domain Name System (DNS) to verify ownership of a domain during the process of obtaining an SSL/TLS certificate, often through services like Let's Encrypt.
+
+When you request a certificate, the Certificate Authority (CA) needs to ensure that you have control over the domain for which you're requesting the certificate. The DNS01 challenge is one of the methods used to prove this control. Here's how it generally works:
+
+1. **Challenge Issuance**: The CA provides you with a unique token (a random string of characters) that needs to be added to your domain's DNS records.
+
+2. **DNS Record Addition**: You must create a specific DNS TXT record for your domain that includes the token provided by the CA. This record usually follows a format like `_acme-challenge.example.com` with a value corresponding to the token.
+
+3. **Verification**: Once you've added the TXT record to your domain's DNS configuration, the CA will query your domain's DNS records to look for the TXT record. If it finds the correct token, it confirms that you control the domain.
+
+4. **Certificate Issuance**: After successful verification, the CA will issue the SSL/TLS certificate.
+
+The DNS01 challenge is commonly used because it allows for domain validation without needing to serve files over HTTP, which can be advantageous in certain situations, such as when you don't have a web server configured or when you're managing multiple subdomains.
+
+One of the tools, that can be employed in the context of Kubernetes, to secure certificates from a Certificate Authority (CA) via the ACME protocol using the the DNS01 challenge, is [cert-manager](https://cert-manager.io). Specifically for Open Telekom Cloud, we can use an additional webhook that acts as an ACME DNS01 solver for Open Telekom Cloud's Domain Name Service, [cert-manager-webhook-opentelekomcloud](https://github.com/akyriako/cert-manager-webhook-opentelekomcloud).
+
+## Prerequisites
+
+Only prerequisite is cert-manager. If you don't have it already installed on your CCE Cluster, this can be very easily done using a Helm Chart:
+
+```shell
+helm repo add jetstack https://charts.jetstack.io
+helm repo update
+
+helm install \
+ cert-manager jetstack/cert-manager \
+ --namespace cert-manager \
+ --create-namespace \
+ --version v1.15.3 \
+ --set crds.enabled=true
+```
+
+## Installing the ACME DNS01 Solver
+
+**cert-manager-webhook-opentelekomcloud** is an ACME DNS01 solver webhook for Open Telekom Cloud DNS written in Golang, and requires **cert-manager** to be installed first.
+
+### Acquiring Access/Secret Keys
+
+In the console, go to *My Credentials* -> *Access Keys* and either pick up an existing pair or create a new one:
+
+![alt text](<../../../../static/img/docs/best-practices/containers/cloud-container-engine/Screenshot from 2024-09-07 11-33-33.png>)
+
+Export this pair as environment variables:
+
+```shell
+export OS_ACCESS_KEY={value}
+export OS_SECRET_KEY={value}
+```
+
+### Installing the Helm Chart
+
+```shell
+helm repo add cert-manager-webhook-opentelekomcloud https://akyriako.github.io/cert-manager-webhook-opentelekomcloud/
+helm repo update
+
+helm upgrade --install \
+ acme-dns cert-manager-webhook-opentelekomcloud/cert-manager-webhook-opentelekomcloud \
+ --set opentelekomcloud.accessKey=$OS_ACCESS_KEY \
+ --set opentelekomcloud.secretKey=$OS_SECRET_KEY \
+ --namespace cert-manager
+```
+
+## Installing Cluster Issuers
+
+You are going to need one `ClusterIssuer` for the *production* and one for the *staging* Let's Encrypt endpoint.
+
+:::warning
+**cert-manager** has a known bug, that prevents custom webhooks to work with an `Issuer`. For that reason you need to install your issuer as `ClusterIssuer`.
+:::
+
+### For Staging
+
+Create and deploy the following manifest:
+
+```yaml title="opentelekomcloud-letsencrypt-staging.yaml"
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: opentelekomcloud-letsencrypt-staging
+ namespace: cert-manager
+spec:
+ acme:
+ email: user@company.com
+ server: https://acme-staging-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: opentelekomcloud-letsencrypt-staging-tls-key
+ solvers:
+ - dns01:
+ webhook:
+ groupName: acme.opentelekomcloud.com
+ solverName: opentelekomcloud
+ config:
+ region: "eu-de"
+ accessKeySecretRef:
+ name: cert-manager-webhook-opentelekomcloud-creds
+ key: accessKey
+ secretKeySecretRef:
+ name: cert-manager-webhook-opentelekomcloud-creds
+ key: secretKey
+```
+
+:::note
+Replace placeholder **email** value, `user@company.com`, with the email that will be used for requesting certificates from Let's Encrypt.
+:::
+
+```shell
+kubectl apply -f opentelekomcloud-letsencrypt-staging.yaml
+```
+
+### For Production
+
+```yaml title="opentelekomcloud-letsencrypt.yaml"
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: opentelekomcloud-letsencrypt
+ namespace: cert-manager
+spec:
+ acme:
+ email: user@company.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: opentelekomcloud-letsencrypt-tls-key
+ solvers:
+ - dns01:
+ webhook:
+ groupName: acme.opentelekomcloud.com
+ solverName: opentelekomcloud
+ config:
+ region: "eu-de"
+ accessKeySecretRef:
+ name: cert-manager-webhook-opentelekomcloud-creds
+ key: accessKey
+ secretKeySecretRef:
+ name: cert-manager-webhook-opentelekomcloud-creds
+ key: secretKey
+```
+
+:::note
+Replace placeholder **email** value, `user@company.com`, with the email that will be used for requesting certificates from Let's Encrypt.
+:::
+
+```shell
+kubectl apply -f opentelekomcloud-letsencrypt.yaml
+```
+
+## Requesting a Certificate
+
+Create and deploy the following manifest:
+
+```yaml title="certificate-subdomain-example-com"
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: certificate-subdomain-example-com
+spec:
+ dnsNames:
+ - '*.subdomain.example.com'
+ issuerRef:
+ kind: ClusterIssuer
+ name: opentelekomcloud-letsencrypt-staging
+ secretName: certificate-subdomain-example-com-tls
+```
+
+:::note
+Replace placeholder DNS name `*.subdomain.example.com`, with one that you own and will be used to request a certificate from Let's Encrypt.
+:::
+
+```shell
+kubectl apply -f certificate-subdomain-example-com
+```
+
+:::warning
+Using the *staging* endpoint of Let's Encrypt before moving to the *production* endpoint is a best practice. Let's Encrypt imposes rate limits on the number of certificates you can request in a given period to prevent abuse. By testing with the staging environment, you avoid hitting these limits during your development and testing phases.
+:::
+
+## Exposing a workload with Ingress
+
+Create and deploy the following manifest:
+
+```yaml title="workload-ingress.yaml"
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: workload-ingress
+ labels:
+ app: workload
+ annotations:
+ kubernetes.io/elb.class: union
+ kubernetes.io/elb.id: "{value}"
+ kubernetes.io/elb.port: 443
+spec:
+ ingressClassName: cce
+ tls:
+ - hosts:
+ - subdomain.example.com
+ secretName: certificate-subdomain-example-com-tls
+ rules:
+ - host: subdomain.example.com
+ http:
+ paths:
+ - backend:
+ service:
+ name: workload-svc
+ port:
+ number: 80
+ path: /
+ pathType: ImplementationSpecific
+```
+
+:::note
+You need to have:
+
+- a *workload* installed in your CCE Cluster (you can experiment with **traefik/whoami**)
+- this workload exposed with a `Service`, *workload-svc*, of type `NodePort`
+- a Shared Elastic Load Balancer
+
+:::
+
+| Parameter | Value |
+| ----------------------- | ---------------------------------------------------------------------------- |
+| kubernetes.io/elb.class | `union`, if it is a Shared Elastic Load Balancer |
+| kubernetes.io/elb.id | Replace placeholder value `{value}` with the ID of the Elastic Load Balancer |
+| kubernetes.io/elb.port | 443 |
+| ingressClassName | `cce` |
+| tls.hosts[0] | Replace placeholder value `subdomain.example.com` with your own |
+| tls.secretName | Use the name of the `Secret` that was created from `Certificate` |
+| rules.host[0] | Replace placeholder value `subdomain.example.com` with your own |
+
+```shell
+kubectl apply -f workload-ingress.yaml
+```
+
+If you visit in your browser the address https://subdomain.example.com you will notice that the endpoint is served in HTTPS and is secured by a valid certificate.
diff --git a/docs/best-practices/databases/document-database-service/how-do-replica-sets-achieve-high-availability-and-readwrite-splitting.md b/docs/best-practices/databases/document-database-service/how-do-replica-sets-achieve-high-availability-and-readwrite-splitting.md
new file mode 100644
index 000000000..7849bcef6
--- /dev/null
+++ b/docs/best-practices/databases/document-database-service/how-do-replica-sets-achieve-high-availability-and-readwrite-splitting.md
@@ -0,0 +1,84 @@
+---
+id: how-do-replica-sets-achieve-high-availability-and-readwrite-splitting
+title: How Do Replica Sets Achieve High Availability and Read/Write Splitting?
+tags: [dds, migration, mongodb]
+---
+
+# How Do Replica Sets Achieve High Availability and Read/Write Splitting?
+
+DDS replica set instances can store multiple duplicates to ensure data high availability and support the automatic switch of private IP addresses to ensure service high availability. To enhance the read and write performance of your client for connecting to the instance, you can use your client to read different data copies. You need to connect to replica set instances using HA connection addresses. You can also configure read/write splitting. Otherwise, the high availability and high read performance of replica set instances cannot be guaranteed.
+
+The primary node of a replica set instance is not fixed. If the instance settings are changed, or the primary node fails, or primary and secondary nodes are switched, a new primary node will be elected and the previous one becomes a secondary node. The following figure shows the process of a switchover.
+
+![**Figure 1** Primary/Secondary switchover](/img/docs/best-practices/databases/document-database-service/en-us_image_0000001166068694.png)
+
+## Connecting to a Replica Set Instance (HA)
+
+A DDS replica set consists of the primary, secondary, and hidden nodes. The hidden node is invisible to users. Read/Write splitting and HA can be realized only when you connect to the IP addresses and ports of the primary and secondary nodes of the replica set at the same time (in HA mode).
+
+The following describes how to use URL and Java to connect to an instance in HA mode.
+
+### Method 1: Using a URL
+
+On the *Instances* page, click the instance name. The *Basic Information* page is displayed. Choose *Connections*. Click the *Private Connection* tab and obtain the connection address of the current instance from the *Private HA Connection Address* field.
+
+![**Figure 2** Obtaining the private HA connection address](/img/docs/best-practices/databases/document-database-service/en-us_image_0000001210912526.png)
+
+Example:
+
+```shell
+mongodb://rwuser:\*\*\*\*@**_192.168.0.148:8635,192.168.0.96:8635_**/test?authSource=admin&replicaSet=replica
+```
+
+In the preceding URL, `192.168.0.148:8635` and `192.168.0.96:8635` are the IP addresses and ports of the primary and secondary nodes, respectively. If you use this address, the connection between your client and the instance can be ensured even when a primary/standby switchover occurs. In addition, using multiple IP addresses and port numbers can enhance the read and write performance of the entire database.
+
+![**Figure 3** Data read and write process ](/img/docs/best-practices/databases/document-database-service/en-us_image_0000001211264689.png)
+
+### Method 2: Using a Java Driver
+
+Sample code:
+
+```java
+MongoClientURI connectionString = new MongoClientURI("mongodb://rwuser:****@192.168.0.148:8635,192.168.0.96:8635/test?authSource=admin&replicaSet=replica"); MongoClient client = new MongoClient(connectionString);
+MongoDatabase database = client.getDatabase("test");
+MongoCollection collection = database.getCollection("mycoll");
+```
+
+| Parameter | Description |
+| :----------------------------------- | :---------------------------------------------------------------------------------- |
+| rwuser:**** | Username and password for starting authentication |
+| 192.168.0.148:8635,192.168.0.96:8635 | IP addresses and ports of the primary and secondary nodes in a replica set instance |
+| test | Name of the database to be connected |
+| authSource=admin | Database username for authentication |
+| replicaSet=replica | Name of the replica set instance type |
+
+**Table 1**" Parameter description
+
+## Connecting to a Replica Set Instance
+
+:::warning
+This is not recommended!
+:::
+
+Using the Connection Address:
+
+```shell
+mongodb://rwuser:\*\*\*\*@**_192.168.0.148:8635_**/test?authSource=admin&replicaSet=replica
+```
+
+In the preceding URL, `192.168.0.148:8635` is the IP address and port number of the current primary node. If a switchover occurs or the primary node is changed, the client fails to connect to the replica set instance because the IP address and port of the newly elected primary node is unknown. As a result, the database service becomes unavailable. In addition, read and write operations can only be performed on a fixed primary node, so the read and write performance cannot be improved by adding nodes.
+
+![**Figure 4** Data read and write process](/img/docs/best-practices/databases/document-database-service/en-us_image_0000001117852888.png)
+
+## Read/Write Splitting
+
+The following HA connection address is used as an example to describe how to connect to a DDS replica set instance:
+
+```shell
+mongodb://rwuser:@192.168.xx.xx:8635,192.168.xx.xx:8635/test?
+authSource=admin&replicaSet=replica&readPreference=secondaryPreferred
+```
+
+The database account is `rwuser`, and the database is `admin`.
+
+After the DB instance is connected, read requests are preferentially sent to the secondary node to implement read/write splitting. If the relationship between the primary and secondary nodes changes, write operations are automatically switched to the new primary node to ensure high availability of DDS.
\ No newline at end of file
diff --git a/docs/best-practices/index.md b/docs/best-practices/index.md
deleted file mode 100644
index e6db7a5dd..000000000
--- a/docs/best-practices/index.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-id: best-practices
-title: Best Practices
----
-
-# Best Practices
-
-Welcome to the Open Telekom Cloud Architecture Center Best Practices.
-Here we provide crucial guidelines for optimizing cloud-based solutions with emphasis to architectural principles that
-enhance reliability, scalability, and security. Explore our recommended strategies for resource management, such as
-efficient utilization of compute and storage resources. Gain insights into designing for high availability and fault tolerance
-to ensure robust system performance. This section serves as a valuable resource for architects and developers
-to implement cloud solutions that align with industry best practices and maximize the benefits of the public cloud
-infrastructure.
diff --git a/docs/best-practices/index.mdx b/docs/best-practices/index.mdx
new file mode 100644
index 000000000..f08808928
--- /dev/null
+++ b/docs/best-practices/index.mdx
@@ -0,0 +1,88 @@
+---
+id: best-practices
+title: Best Practices
+---
+
+import BestPractices from '@site/src/components/ServiceCallouts';
+import ApplicationServices from '@site/src/components/ServiceCallouts/ApplicationServices';
+import DataAnalysisServices from '@site/src/components/ServiceCallouts/DataAnalysisServices';
+import ComputingServices from '@site/src/components/ServiceCallouts/ComputingServices';
+import ContainerServices from '@site/src/components/ServiceCallouts/ContainerServices';
+import DatabaseServices from '@site/src/components/ServiceCallouts/DatabaseServices';
+import ManagementServices from '@site/src/components/ServiceCallouts/ManagementServices';
+import NetworkingServices from '@site/src/components/ServiceCallouts/NetworkingServices';
+import SecurityServices from '@site/src/components/ServiceCallouts/SecurityServices';
+import StorageServices from '@site/src/components/ServiceCallouts/StorageServices';
+import clsx from 'clsx';
+import Heading from '@theme/Heading';
+import styles from './styles.module.css';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Best Practices
+
+ Welcome to the Open Telekom Cloud Architecture Center Best Practices.
+ Here we provide crucial guidelines for optimizing cloud-based solutions with emphasis to architectural principles that
+ enhance reliability, scalability, and security. Explore our recommended strategies for resource management, such as
+ efficient utilization of compute and storage resources. Gain insights into designing for high availability and fault tolerance
+ to ensure robust system performance. This section serves as a valuable resource for architects and developers
+ to implement cloud solutions that align with industry best practices and maximize the benefits of the public cloud
+ infrastructure.
+
+
+
+
+
+
+ Computing
+
+
+ Networking
+
+
+ Storage
+
+
+
+
+
+
+
+
+
+ Application Services
+
+
+ Big Data & Data Analysis
+
+
+ Database Services
+
+
+ Container Services
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/best-practices/networking/virtual-private-network/connecting-multiple-on-premises-branch-networks-through-a-vpn-hub.md b/docs/best-practices/networking/virtual-private-network/connecting-multiple-on-premises-branch-networks-through-a-vpn-hub.md
new file mode 100644
index 000000000..bdb26b5ed
--- /dev/null
+++ b/docs/best-practices/networking/virtual-private-network/connecting-multiple-on-premises-branch-networks-through-a-vpn-hub.md
@@ -0,0 +1,317 @@
+---
+id: connecting-multiple-on-premises-branch-networks-through-a-vpn-hub
+title: Connecting Multiple On-Premises Branch Networks Through a VPN Hub
+tags: [vpn, hybrid, networking]
+---
+
+# Connecting Multiple On-Premises Branch Networks Through a VPN Hub
+
+To meet service requirements, enterprise A needs to implement communication between its two on-premises data centers.
+
+## Solution Design
+
+[Figure 1](#figure-1) shows the networking where the VPN service is used to connect the two on-premises data centers.
+
+
+
+![](/img/docs/best-practices/networking/virtual-private-network/connecting-multiple-on-premises-branch-networks-through-a-vpn-hub/en-us_image_0000001592878805.png)
+
+**Figure 1**: Networking diagram
+
+### Advantages
+
+* A VPN gateway on the cloud can function as a VPN hub to enable communication between on-premises branch sites. This eliminates the need to configure VPN connections between every two sites.
+* A VPN gateway provides two IP addresses to establish dual independent VPN connections with each customer gateway. If one VPN connection fails, traffic can be quickly switched to the other VPN connection, ensuring reliability.
+
+### Limitations and Constraints
+
+* The local and customer subnets of the VPN gateway cannot be the same. That is, the VPC subnet and the data center subnet to be interconnected cannot be the same.
+* The IKE policy, IPsec policy, and PSK of the VPN gateway must be the same as those of the customer gateway.
+* The local and remote interface address configurations on the VPN gateway and customer gateway are reversed.
+* The security groups associated with ECSs in the VPC permit access from and to the on-premises data center.
+
+## Planning Networks and Resources
+
+### Data Plan
+
+
+
+
+
+
+
Category
+
Item
+
Data
+
+
+
+
+
VPC
+
Subnet that needs to access the on-premises data centers
+
+ • `192.168.0.0/24`
+ • `192.168.1.0/24`
+
+
+
+
VPN gateway
+
Interconnection subnet
+
+ This subnet is used for communication between the VPN gateway and VPC. Ensure that the selected interconnection subnet has four or more assignable IP addresses.
+ `192.168.2.0/24`
+
+
+
+
HA Mode
+
**Active-active**
+
+
+
EIP
+
+ EIPs are automatically generated when you create them. By default, a VPN gateway uses two EIPs. In this example, the EIPs are as follows:
+ • Active EIP: `1.1.1.2`
+ • Active EIP 2: `2.2.2.2`
+
+
+
+
VPN connection
+
Tunnel interface address
+
+ This address is used by a VPN gateway to establish an IPsec tunnel with a customer gateway. At the two ends of the IPsec tunnel, the configured local and remote tunnel interface addresses must be reversed.
+ VPN connections set up with on-premises data center 1:
+ • VPN connection 1: `169.254.70.1/30`
+ • VPN connection 2: `169.254.71.1/30`
+ VPN connections set up with on-premises data center 2:
+ • VPN connection 3: `169.254.72.1/30`
+ • VPN connection 4: `169.254.73.1/30`
+
+
+
+
On-premises data center 1
+
Subnet that needs to access the VPC
+
`172.16.0.0/16`
+
+
+
Customer gateway in on-premises data center 1
+
Public IP address
+
+ This public IP address is assigned by a carrier. In this example, the public IP address is:
+
+**Table 1**: Data Plan
+
+## Prerequisites
+
+* Cloud side
+ * A VPC has been created. For details about how to create a VPC, see [Creating a VPC](https://docs.otc.t-systems.com/virtual-private-cloud/umn/vpc_and_subnet/vpc/creating_a_vpc.html).
+ * Security group rules have been configured for the VPC, and ECSs can communicate with other devices on the cloud. For details about how to configure security group rules, see [Security Group Rules](https://docs.otc.t-systems.com/virtual-private-cloud/umn/access_control/security_group/managing_security_group_rules/adding_a_security_group_rule.html).
+* Data center side
+ * IPsec has been configured on the VPN devices in the two on-premises data centers. For details, see [Administrator Guide](https://docs.otc.t-systems.com/virtual-private-network/umn/administrator_guide/index.html).
+ * The remote subnets of the VPN device in on-premises data center 1 must contain the local subnet of the Open Telekom Cloud VPC and the subnet to be interconnected in on-premises data center 2. The remote subnets of the VPN device in on-premises data center 2 must contain the local subnet of the Open Telekom Cloud VPC and the subnet to be interconnected in on-premises data center 1.
+
+### Configuration
+
+Open Telekom Cloud VPNs support static routing mode, BGP routing mode, and policy-based mode. The following uses the static routing mode as an example.
+
+1. Configure a VPN gateway.
+ 1. Choose *Virtual Private Network* -> *Enterprise – VPN Gateways*, and click *Create VPN Gateway*.
+ 2. Set parameters as prompted.
+
+ [Table 1](#table-1) only describes the key parameters for creating a VPN gateway.
+
+
+
+ **Table 1** Description of VPN gateway parameters
+ | Parameter | Description | Value |
+ | ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- |
+ | Name | Name of a VPN gateway. | vpngw-001 |
+ | Network Type | Select **Public network**. | Public network |
+ | Associate With | Select **VPC**. If the VPN gateway is associated with an enterprise router, select **Enterprise Router**. | VPC |
+ | VPC | Open Telekom Cloud VPC that the on-premises data centers need to access. | vpc-001(192.168.0.0/16) |
+ | Local Subnet | VPC subnets that the on-premises data centers need to access. | 192.168.0.0/24,192.168.1.0/24 |
+ | Interconnection Subnet | This subnet is used for communication between the VPN gateway and VPC. Ensure that the selected interconnection subnet has four or more assignable IP addresses. | 192.168.2.0/24 |
+ | BGP ASN | BGP AS number. | 64512 |
+ | HA Mode | Select **Active-active**. | Active-active |
+ | Active EIP | EIP 1 used by the VPN gateway to access the on-premises data center. | 1.1.1.2 |
+ | Active EIP 2 | EIP 2 used by the VPN gateway to access the on-premises data center. | 2.2.2.2 |
+
+
+2. Configure customer gateways.
+ 1. Choose *Virtual Private Network* -> *Enterprise – Customer Gateways*, and click *Create Customer Gateway*.
+ 2. Set parameters as prompted.
+
+ [Table 2](#table-2) only describes the key parameters for creating a customer gateway.
+
+
+
+ **Table 2** Description of customer gateway parameters
+
+ | Parameter | Description | Value |
+ | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+ | Name | Name of a customer gateway. | cgw-fw1 |
+ | Routing Mode | Select **Static**. | Static |
+ | Gateway IP Address | IP address used by the customer gateway in on-premises data center 1 to communicate with the Open Telekom Cloud VPN gateway. Ensure that UDP port 4500 is permitted on the customer gateway device in the on-premises data center. | 1.1.1.1 |
+
+
+
+ 3. Repeat the preceding operations to configure the customer gateway (2.2.2.1) in on-premises data center 2.
+3. Configure VPN connections between the cloud side and on-premises data center 1.
+ 1. Choose *Virtual Private Network* -> *Enterprise – VPN Connections*, and click *Create VPN Connection*.
+ 2. Set parameters for VPN connection 1 and click *Submit*.
+
+ [Table 3](#table-3) only describes the key parameters for creating a VPN connection.
+
+
+
+ **Table 3** Description of VPN connection parameters
+
+ | Parameter | Description | Value |
+ | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
+ | Name | Name of a VPN connection. | vpn-001 |
+ | VPN Gateway | VPN gateway for which the VPN connection is created. | vpngw-001 |
+ | Gateway IP Address | Active EIP bound to the VPN gateway. | 1.1.1.2 |
+ | VPN Type | Select **Static routing**. | Static routing |
+ | Customer Gateway | Name of a customer gateway. | cgw-fw1 |
+ | Customer Subnet | Subnet in on-premises data center 1 that needs to access the VPC on Open Telekom Cloud. A customer subnet cannot be included in any local subnet or any subnet of the VPC to which the VPN gateway is attached. Reserved VPC CIDR blocks such as 100.64.0.0/10 and 214.0.0.0/8 cannot be used as customer subnets. | 172.16.0.0/16 |
+ | Interface IP Address Assignment | Manually specify In this example, select **Manually specify**. Automatically assign | Manually specify |
+ | Local Tunnel Interface Address | Tunnel interface IP address configured on the VPN gateway. | 169.254.70.1 |
+ | Customer Tunnel Interface Address | Tunnel interface IP address configured on the customer gateway device. | 169.254.70.2 |
+ | Link Detection | Whether to enable route reachability detection in multi-link scenarios. When NQA is enabled, ICMP packets are sent for detection and your device needs to respond to these ICMP packets. | **NQA** enabled |
+ | PSK, Confirm PSK | The value must be the same as the PSK configured on the customer gateway device. | Test@123 |
+ | Policy Settings | The policy settings must be the same as those on the customer gateway device. | Default |
+
+
+
+ 3. Create VPN connection 2.
+
+ :::note
+ For VPN connection 2, you are advised to use the same parameter settings as VPN connection 1, except the parameters listed in the following table.
+ :::
+
+
+
+ **Table 4** Parameter settings for VPN connection 2
+
+ | Parameter | Description | Value |
+ | --------------------------------- | ------------------------------------------ | ------------ |
+ | Name | Name of a VPN connection. | vpn-002 |
+ | Gateway IP Address | Active EIP 2 bound to the VPN gateway. | 2.2.2.2 |
+ | Local Tunnel Interface Address | Tunnel IP address of the VPN gateway. | 169.254.71.1 |
+ | Customer Tunnel Interface Address | Tunnel IP address of the customer gateway. | 169.254.71.2 |
+
+4. Configure VPN connections between the cloud side and on-premises data center 2.
+ 1. Choose *Virtual Private Network* -> *Enterprise – VPN Connections*, and click *Create VPN Connection*.
+ 2. Set parameters for VPN connection 1 as prompted and click *Submit*.
+
+ [Table 5](#table-5) only describes the key parameters for creating a VPN connection.
+
+
+
+ **Table 5** Description of VPN connection parameters
+
+ | Parameter | Description | Value |
+ | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
+ | Name | Name of a VPN connection. | vpn-003 |
+ | VPN Gateway | VPN gateway for which the VPN connection is created. | vpngw-001 |
+ | Gateway IP Address | Active EIP bound to the VPN gateway. | 1.1.1.2 |
+ | Customer Gateway | Name of a customer gateway. | cgw-fw2 |
+ | VPN Type | Select **Static routing**. | Static routing |
+ | Customer Subnet | Subnet in on-premises data center 2 that needs to access the VPC on Open Telekom Cloud. A customer subnet cannot be included in any local subnet or any subnet of the VPC to which the VPN gateway is attached. Reserved VPC CIDR blocks such as 100.64.0.0/10 and 214.0.0.0/8 cannot be used as customer subnets. | 10.10.0.0/16 |
+ | Interface IP Address Assignment | **Manually specify** In this example, select Manually specify. Automatically assign | Manually specify |
+ | Local Tunnel Interface Address | Tunnel interface IP address configured on the VPN gateway. | 169.254.72.1 |
+ | Customer Tunnel Interface Address | Tunnel interface IP address configured on the customer gateway device. | 169.254.72.2 |
+ | Link Detection | Whether to enable route reachability detection in multi-link scenarios. When NQA is enabled, ICMP packets are sent for detection and your device needs to respond to these ICMP packets. | **NQA** enabled |
+ | PSK, Confirm PSK | The value must be the same as the PSK configured on the customer gateway device in on-premises data center 2. | Test@123 |
+ | Policy Settings | The policy settings must be the same as those configured on the customer gateway device in on-premises data center 2. | Default |
+
+
+
+ 3. Create VPN connection 2.
+
+ :::note
+ For VPN connection 2, you are advised to use the same parameter settings as VPN connection 1, except the parameters listed in the following table.
+ :::
+
+
+
+ **Table 6** Parameter settings for VPN connection 2
+
+ | Parameter | Description | Value |
+ | --------------------------------- | ----------------------------------------------------------------------- | ------------ |
+ | Name | Name of a VPN connection. | vpn-004 |
+ | Gateway IP Address | Active EIP 2 bound to the VPN gateway. | 2.2.2.2 |
+ | Local Tunnel Interface Address | Tunnel IP address of the VPN gateway. | 169.254.73.1 |
+ | Customer Tunnel Interface Address | Tunnel IP address of the customer gateway in on-premises data center 2. | 169.254.73.2 |
+
+
+5. Configure customer gateway devices in on-premises data centers 1 and 2.
+
+ The configuration procedures may vary according to the type of the customer gateway device. For details, see [Administrator Guide](https://docs.otc.t-systems.com/virtual-private-network/umn/administrator_guide/index.html).
+
+
+### Verification
+
+* About 5 minutes later, check states of the VPN connections.
+
+ Choose *Virtual Private Network* -> *Enterprise – VPN Connections*. The states of the four VPN connections are all *Normal*.
+
+* Verify that servers in on-premises data center 1 and servers in on-premises data center 2 can ping each other.
+
diff --git a/docs/best-practices/storage/object-storage-service/accessing-obs-through-an-nginx-reverse-proxy.md b/docs/best-practices/storage/object-storage-service/accessing-obs-through-an-nginx-reverse-proxy.md
index 4e9e5ab55..d6941c790 100644
--- a/docs/best-practices/storage/object-storage-service/accessing-obs-through-an-nginx-reverse-proxy.md
+++ b/docs/best-practices/storage/object-storage-service/accessing-obs-through-an-nginx-reverse-proxy.md
@@ -6,8 +6,8 @@ tags: [storage, obs, reverse-proxy, nginx]
# Accessing OBS Through an NGINX Reverse Proxy
-Generally, you can access OBS using a bucket's access domain name [for
-example](https://**bucketname**.obs.eu-de.otc.t-systems.com)
+Generally, you can access OBS using a bucket's access domain name (for
+example, **https://`bucketname`.obs.eu-de.otc.t-systems.com**)
provided by OBS or using a user-defined domain name bound to an OBS
bucket.
@@ -34,11 +34,7 @@ actual domain name or IP address of OBS is hidden.
proxy](/img/docs/best-practices/storage/object-storage-service/en-us_image_0273872842.png)
## Prerequisites
-
-- You have known the region and access domain name of the bucket. For
- example, the access domain name of a bucket in the eu-de region is
- `nginx-obs.obs.eu-de.otc.t-systems.com`. To obtain the
- information, see [Querying Basic Information of a
+- You know the region and access domain name of the bucket. For example, the access domain name of a bucket named `nginx-obs` in the **eu-de** region is `nginx-obs.obs.eu-de.otc.t-systems.com`. To obtain the information, see [Querying Basic Information of a
Bucket](https://docs.otc.t-systems.com/object-storage-service/umn/obs_browser_operation_guide/managing_buckets/viewing_basic_information_of_a_bucket.html).
- You have a Linux ECS **in the same region**. CentOS is used here as an
example. For details, see [Creating an
@@ -110,9 +106,12 @@ b. Press the *i* key to go to the edit mode and modify the
| Parameter | Description |
| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| server_name | IP address that provides the reverse proxy service. It is the fixed IP address that is exposed to end users for access. Enter the EIP of the ECS where the NGINX reverse proxy service is deployed. |
- | proxy_pass | IP address of the proxied server. Enter the OBS bucket access domain name required in [Prerequisites](#prerequisites). The domain name must start with http:// or https://.
Example: [https://nginx-obs.obs.eu-de.otc.t-systems.com](https://nginx-obs.obs.eu-de.otc.t-systems.com) **Note**: When you use an API, SDK, or obsutil for calling, set this parameter to the region domain name. The following is an example: `obs.eu-de.otc.t-systems.com` |
- | proxy_buffering | Whether to enable the proxy buffer. The value can be `on` or `off`. If this parameter is set to on, Nginx stores the response returned by the backend in a buffer and then sends the data to the client. If this parameter is set to off, Nginx sends the response to the client as soon as it receives the data from the backend. Default value: `on`
Example: `proxy_buffering off` |
+ | proxy_pass | IP address of the proxied server. Enter the OBS bucket access domain name required in [Prerequisites](#prerequisites). The domain name must start with http:// or https://.
Example: [https://nginx-obs.obs.eu-de.otc.t-systems.com](https://nginx-obs.obs.eu-de.otc.t-systems.com)|
+ | proxy_buffering | Whether to enable the proxy buffer. The value can be `on` or `off`. If this parameter is set to on, Nginx stores the response returned by the backend in a buffer and then sends the data to the client. If this parameter is set to off, Nginx sends the response to the client as soon as it receives the data from the backend. Default value: `on`
Example: `proxy_buffering off` |
+:::note
+When you use an API, SDK, or obsutil for calling, set **proxy_pass** to the region domain name. The following is an example: `obs.eu-de.otc.t-systems.com`.
+:::
c. Press the *ESC* key and enter *:wq* to save the
configuration and exit.
@@ -157,11 +156,107 @@ c. In the navigation pane, choose *Permissions* -> *Bucket
d. Click *Create*.
-e. Choose a policy configuration method you like. *Visual Editor*
- is used here.
+e. Choose a policy configuration method you like. *Visual Editor* is used here.
+
+![*Figure 3* ](/img/docs/best-practices/storage/object-storage-service/policy-visual-editor.png)
f. Configure the following parameters.
+
+
+ Table 2
+ Bucket policy parameters
+
+
+
+
Parameter
+
+
Description
+
+
+
+
+
Policy Name
+
+
Enter a policy name.
+
+
+
Policy content
+
Effect
+
Select Allow.
+
+
+
+
Principal
+
+
+
To select All accounts enter *.
+
+
+
+
+
+
Resources
+
+
+
+ Method 1:
+
+
Select Entire bucket (including the objects in it).
+
+
+
+ Method 2:
+
+
Select Current bucket and Specified objects.
+
Set the resource path to * to indicate all objects in the bucket.
+
+
+
+
+
+
+
+
Actions
+
+
+
Choose Customize.
+
Select Get* and List*.
+
+
+
+
+
+
Conditions (Optional)
+
+
+
Key: Select SourceIp.
+
Condition Operator: Select IpAddress
+
+ Value:
+
+
+
If the ECS uses a public DNS, the value is as follows:
+
Elastic IP address of the ECS
+
+
+
If the ECS uses a Open Telekom Cloud private DNS, the value is as follows:
+
100.64.0.0/10,214.0.0.0/7,Private IP address of the ECS