From ea423ac748dcfe061b81ae0bc59859b9363d2554 Mon Sep 17 00:00:00 2001 From: gh-actions Date: Thu, 3 Aug 2023 17:26:18 +0000 Subject: [PATCH] Deploy website - based on 75ab94d820869d0af3102a5b9189f4126c4ed007 --- 404.html | 4 ++-- assets/js/{3d9c95a4.3ed583a5.js => 3d9c95a4.aeee8a2e.js} | 2 +- assets/js/{81482700.b74ab000.js => 81482700.7bacc91a.js} | 2 +- assets/js/{8d193b98.04a07b21.js => 8d193b98.20c777c8.js} | 2 +- assets/js/{9ed00105.77f97e25.js => 9ed00105.8df2f6a6.js} | 2 +- assets/js/{b9207088.0df17469.js => b9207088.f80f3130.js} | 2 +- assets/js/{d12053bc.2505c139.js => d12053bc.d294519a.js} | 2 +- assets/js/{fd0c4088.ff5067dd.js => fd0c4088.b901191e.js} | 2 +- assets/js/{fd0f2061.fd5dd73e.js => fd0f2061.4effe9e9.js} | 2 +- assets/js/runtime~main.1ace37a5.js | 1 - assets/js/runtime~main.d1ce8223.js | 1 + configuration.html | 6 +++--- contributing.html | 6 +++--- cookbook.html | 6 +++--- index.html | 6 +++--- key-migration.html | 6 +++--- kubectl-plugin.html | 6 +++--- rolling-update.html | 6 +++--- scaling.html | 6 +++--- 19 files changed, 35 insertions(+), 35 deletions(-) rename assets/js/{3d9c95a4.3ed583a5.js => 3d9c95a4.aeee8a2e.js} (99%) rename assets/js/{81482700.b74ab000.js => 81482700.7bacc91a.js} (99%) rename assets/js/{8d193b98.04a07b21.js => 8d193b98.20c777c8.js} (99%) rename assets/js/{9ed00105.77f97e25.js => 9ed00105.8df2f6a6.js} (99%) rename assets/js/{b9207088.0df17469.js => b9207088.f80f3130.js} (99%) rename assets/js/{d12053bc.2505c139.js => d12053bc.d294519a.js} (99%) rename assets/js/{fd0c4088.ff5067dd.js => fd0c4088.b901191e.js} (99%) rename assets/js/{fd0f2061.fd5dd73e.js => fd0f2061.4effe9e9.js} (99%) delete mode 100644 assets/js/runtime~main.1ace37a5.js create mode 100644 assets/js/runtime~main.d1ce8223.js diff --git a/404.html b/404.html index 95cd15d6..7aebc947 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | Operator for Redis Cluster - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/3d9c95a4.3ed583a5.js b/assets/js/3d9c95a4.aeee8a2e.js similarity index 99% rename from assets/js/3d9c95a4.3ed583a5.js rename to assets/js/3d9c95a4.aeee8a2e.js index ac642537..a9c91d6d 100644 --- a/assets/js/3d9c95a4.3ed583a5.js +++ b/assets/js/3d9c95a4.aeee8a2e.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[125],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>m});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=o.createContext({}),p=function(e){var t=o.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},d=function(e){var t=p(e.components);return o.createElement(s.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},h=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,s=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),u=p(r),h=n,m=u["".concat(s,".").concat(h)]||u[h]||c[h]||a;return r?o.createElement(m,i(i({ref:t},d),{},{components:r})):o.createElement(m,i({ref:t},d))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,i=new Array(a);i[0]=h;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:n,i[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>i,default:()=>c,frontMatter:()=>a,metadata:()=>l,toc:()=>p});var o=r(7462),n=(r(7294),r(3905));const a={title:"Home",hide_title:!0,slug:"/"},i=void 0,l={unversionedId:"home",id:"home",title:"Home",description:"logo",source:"@site/docs/home.md",sourceDirName:".",slug:"/",permalink:"/operator-for-redis-cluster/",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/home.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Home",hide_title:!0,slug:"/"},sidebar:"docs",next:{title:"Cookbook",permalink:"/operator-for-redis-cluster/cookbook"}},s={},p=[{value:"Project status: alpha",id:"project-status-alpha",level:2},{value:"Overview",id:"overview",level:2},{value:"Deployment",id:"deployment",level:2},{value:"Environment requirements",id:"environment-requirements",level:3},{value:"Helm chart deployment",id:"helm-chart-deployment",level:3},{value:"Create the RedisCluster",id:"create-the-rediscluster",level:4},{value:"Install kubectl redis-cluster plugin",id:"install-kubectl-redis-cluster-plugin",level:3},{value:"Deployment from source code",id:"deployment-from-source-code",level:3},{value:"Build container images",id:"build-container-images",level:4},{value:"How to Release the Redis Operator",id:"how-to-release-the-redis-operator",level:3},{value:"How to Upgrade Redis Client Version",id:"how-to-upgrade-redis-client-version",level:3}],d={toc:p},u="wrapper";function c(e){let{components:t,...a}=e;return(0,n.kt)(u,(0,o.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("p",null,(0,n.kt)("img",{alt:"logo",src:r(8795).Z,width:"660",height:"204"})),(0,n.kt)("h2",{id:"project-status-alpha"},"Project status: alpha"),(0,n.kt)("p",null,"This is an ongoing project."),(0,n.kt)("p",null,"The goal of this project is to simplify the deployment and management of a ",(0,n.kt)("a",{parentName:"p",href:"https://redis.io/topics/cluster-tutorial"},"Redis cluster")," in a ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," environment. It started internally at Amadeus in 2016, where it was initially designed to run on ",(0,n.kt)("a",{parentName:"p",href:"https://www.openshift.com/"},"Openshift"),". This is the third version of the Redis operator, which leverages the ",(0,n.kt)("a",{parentName:"p",href:"https://sdk.operatorframework.io/"},"Operator SDK")," framework for operators."),(0,n.kt)("h2",{id:"overview"},"Overview"),(0,n.kt)("p",null,"This project contains two Helm charts, namely ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"node-for-redis"),". The first chart deploys the Redis operator, ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," Custom Resource Definition (CRD), and various other k8s resources. The second chart deploys the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," resource and various other k8s resources. Each node in the Redis cluster runs in its own Pod. Upon startup, each node joins the cluster as a primary node with no slots. See the cluster representation in the diagram below:"),(0,n.kt)("p",null,(0,n.kt)("img",{alt:"Initial state",src:r(3).Z,width:"729",height:"394"})),(0,n.kt)("p",null,"At this point, your Redis process is running and each node is aware of each other, but only one primary has all the slots. In order to properly configure each node in the cluster, we introduce the ",(0,n.kt)("inlineCode",{parentName:"p"},"Operator for Redis Cluster"),"."),(0,n.kt)("p",null,"The operator watches the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," CR that stores cluster configuration: number of primaries, replication factor (number of replicas per primary), and the pod template. Then the operator tries to apply this configuration to the set of Redis server processes. If the number of Redis servers doesn't match the provided configuration, the manager scales the number of pods to obtain the proper number of Redis nodes. The operator continuously reconciles the state of the cluster with the configuration stored in the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," CR until they match. To understand how the reconciliation loop works, see the ",(0,n.kt)("a",{parentName:"p",href:"https://sdk.operatorframework.io/docs/building-operators/golang/tutorial/#reconcile-loop"},"Operator SDK docs"),"."),(0,n.kt)("h2",{id:"deployment"},"Deployment"),(0,n.kt)("p",null,"You can follow the ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook"},"cookbook")," to deploy the operator and Redis cluster with Minikube."),(0,n.kt)("h3",{id:"environment-requirements"},"Environment requirements"),(0,n.kt)("p",null,"The project may have started on Openshift, but it now supports Kubernetes as well. Please check the minimum environment version in the table below."),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Environment"),(0,n.kt)("th",{parentName:"tr",align:null},"Version"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Openshift"),(0,n.kt)("td",{parentName:"tr",align:null},">= 3.7")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Kubernetes"),(0,n.kt)("td",{parentName:"tr",align:null},">= 1.7")))),(0,n.kt)("h3",{id:"helm-chart-deployment"},"Helm chart deployment"),(0,n.kt)("p",null,"You can find two Helm charts in the ",(0,n.kt)("inlineCode",{parentName:"p"},"charts")," folder:"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"operator-for-redis")," used to deploy the operator in your Kubernetes cluster."),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"node-for-redis")," used to create the ",(0,n.kt)("inlineCode",{parentName:"li"},"RedisCluster")," CR that will be managed by the operator.")),(0,n.kt)("p",null,"Operator deployment example:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install operator-for-redis charts/operator-for-redis\nNAME: operator-for-redis\nLAST DEPLOYED: Fri Aug 13 11:48:29 2021\nNAMESPACE: default\nSTATUS: deployed\n\nRESOURCES:\n==> v1/Deployment\nNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE\noperator-for-redis 1 1 1 1 10s\n")),(0,n.kt)("h4",{id:"create-the-rediscluster"},"Create the RedisCluster"),(0,n.kt)("p",null,"You can configure the topology of the cluster by editing the provided ",(0,n.kt)("inlineCode",{parentName:"p"},"values.yaml"),", using an override file, and/or setting each value with ",(0,n.kt)("inlineCode",{parentName:"p"},"--set")," when you execute ",(0,n.kt)("inlineCode",{parentName:"p"},"helm install"),"."),(0,n.kt)("p",null,"Redis cluster deployment example:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install node-for-redis charts/node-for-redis\nNAME: node-for-redis\nLAST DEPLOYED: Fri Aug 13 11:48:29 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},(0,n.kt)("strong",{parentName:"p"},"! Warning !"),', if you want to use the docker images corresponding to the level of code present in the "main" branch. You need to set the image tag when you instantiate the node-for-redis chart and the operator-for-redis-cluster chart. The "latest" tag is corresponding to the last validated release.')),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install node-for-redis charts/node-for-redis --set image.tag=main-$COMMIT-dev\n")),(0,n.kt)("h3",{id:"install-kubectl-redis-cluster-plugin"},"Install kubectl redis-cluster plugin"),(0,n.kt)("p",null,"Docs available ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/kubectl-plugin"},"here"),"."),(0,n.kt)("h3",{id:"deployment-from-source-code"},"Deployment from source code"),(0,n.kt)("h4",{id:"build-container-images"},"Build container images"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"cd $GOPATH/src/github.com/IBM/operator-for-redis-cluster\nmake container\n")),(0,n.kt)("p",null,'you can define the docker images tag by adding the variable "TAG"'),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make TAG= container\n")),(0,n.kt)("h3",{id:"how-to-release-the-redis-operator"},"How to Release the Redis Operator"),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},"Do the following in ",(0,n.kt)("inlineCode",{parentName:"p"},"main")," branch:"),(0,n.kt)("ol",{parentName:"blockquote"},(0,n.kt)("li",{parentName:"ol"},"Create a tag on commit"),(0,n.kt)("li",{parentName:"ol"},"Push the commit and tag"),(0,n.kt)("li",{parentName:"ol"},"Github actions automation will build and push docker images and helm charts with release version")),(0,n.kt)("p",{parentName:"blockquote"},"NOTE: If you need to test the build prior to the above steps, you can run: ",(0,n.kt)("inlineCode",{parentName:"p"},"make build")," and resolve any issues.")),(0,n.kt)("h3",{id:"how-to-upgrade-redis-client-version"},"How to Upgrade Redis Client Version"),(0,n.kt)("p",null,"To upgrade your Redis client version, you will need to update the ",(0,n.kt)("inlineCode",{parentName:"p"},"REDIS_VERSION")," variable in both the Dockerfile for Redis node and the Github release workflow. Please note that upgrading the Redis client version may impact functionality because the operator depends on the ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/mediocregopher/radix"},"radix library")," for executing Redis commands."))}c.isMDXComponent=!0},8795:(e,t,r)=>{r.d(t,{Z:()=>o});const o=r.p+"assets/images/logo-34d5aecd76e632e1a7d7bc9e9d9d7c67.png"},3:(e,t,r)=>{r.d(t,{Z:()=>o});const o=r.p+"assets/images/overview_1-87d24b48af2c162cd54641cd2424229d.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[125],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>m});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=o.createContext({}),p=function(e){var t=o.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},d=function(e){var t=p(e.components);return o.createElement(s.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},h=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,s=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),u=p(r),h=n,m=u["".concat(s,".").concat(h)]||u[h]||c[h]||a;return r?o.createElement(m,i(i({ref:t},d),{},{components:r})):o.createElement(m,i({ref:t},d))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,i=new Array(a);i[0]=h;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:n,i[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>i,default:()=>c,frontMatter:()=>a,metadata:()=>l,toc:()=>p});var o=r(7462),n=(r(7294),r(3905));const a={title:"Home",hide_title:!0,slug:"/"},i=void 0,l={unversionedId:"home",id:"home",title:"Home",description:"logo",source:"@site/docs/home.md",sourceDirName:".",slug:"/",permalink:"/operator-for-redis-cluster/",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/home.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Home",hide_title:!0,slug:"/"},sidebar:"docs",next:{title:"Cookbook",permalink:"/operator-for-redis-cluster/cookbook"}},s={},p=[{value:"Project status: alpha",id:"project-status-alpha",level:2},{value:"Overview",id:"overview",level:2},{value:"Deployment",id:"deployment",level:2},{value:"Environment requirements",id:"environment-requirements",level:3},{value:"Helm chart deployment",id:"helm-chart-deployment",level:3},{value:"Create the RedisCluster",id:"create-the-rediscluster",level:4},{value:"Install kubectl redis-cluster plugin",id:"install-kubectl-redis-cluster-plugin",level:3},{value:"Deployment from source code",id:"deployment-from-source-code",level:3},{value:"Build container images",id:"build-container-images",level:4},{value:"How to Release the Redis Operator",id:"how-to-release-the-redis-operator",level:3},{value:"How to Upgrade Redis Client Version",id:"how-to-upgrade-redis-client-version",level:3}],d={toc:p},u="wrapper";function c(e){let{components:t,...a}=e;return(0,n.kt)(u,(0,o.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("p",null,(0,n.kt)("img",{alt:"logo",src:r(8795).Z,width:"660",height:"204"})),(0,n.kt)("h2",{id:"project-status-alpha"},"Project status: alpha"),(0,n.kt)("p",null,"This is an ongoing project."),(0,n.kt)("p",null,"The goal of this project is to simplify the deployment and management of a ",(0,n.kt)("a",{parentName:"p",href:"https://redis.io/topics/cluster-tutorial"},"Redis cluster")," in a ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/"},"Kubernetes")," environment. It started internally at Amadeus in 2016, where it was initially designed to run on ",(0,n.kt)("a",{parentName:"p",href:"https://www.openshift.com/"},"Openshift"),". This is the third version of the Redis operator, which leverages the ",(0,n.kt)("a",{parentName:"p",href:"https://sdk.operatorframework.io/"},"Operator SDK")," framework for operators."),(0,n.kt)("h2",{id:"overview"},"Overview"),(0,n.kt)("p",null,"This project contains two Helm charts, namely ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"node-for-redis"),". The first chart deploys the Redis operator, ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," Custom Resource Definition (CRD), and various other k8s resources. The second chart deploys the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," resource and various other k8s resources. Each node in the Redis cluster runs in its own Pod. Upon startup, each node joins the cluster as a primary node with no slots. See the cluster representation in the diagram below:"),(0,n.kt)("p",null,(0,n.kt)("img",{alt:"Initial state",src:r(3).Z,width:"729",height:"394"})),(0,n.kt)("p",null,"At this point, your Redis process is running and each node is aware of each other, but only one primary has all the slots. In order to properly configure each node in the cluster, we introduce the ",(0,n.kt)("inlineCode",{parentName:"p"},"Operator for Redis Cluster"),"."),(0,n.kt)("p",null,"The operator watches the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," CR that stores cluster configuration: number of primaries, replication factor (number of replicas per primary), and the pod template. Then the operator tries to apply this configuration to the set of Redis server processes. If the number of Redis servers doesn't match the provided configuration, the manager scales the number of pods to obtain the proper number of Redis nodes. The operator continuously reconciles the state of the cluster with the configuration stored in the ",(0,n.kt)("inlineCode",{parentName:"p"},"RedisCluster")," CR until they match. To understand how the reconciliation loop works, see the ",(0,n.kt)("a",{parentName:"p",href:"https://sdk.operatorframework.io/docs/building-operators/golang/tutorial/#reconcile-loop"},"Operator SDK docs"),"."),(0,n.kt)("h2",{id:"deployment"},"Deployment"),(0,n.kt)("p",null,"You can follow the ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook"},"cookbook")," to deploy the operator and Redis cluster with Minikube."),(0,n.kt)("h3",{id:"environment-requirements"},"Environment requirements"),(0,n.kt)("p",null,"The project may have started on Openshift, but it now supports Kubernetes as well. Please check the minimum environment version in the table below."),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Environment"),(0,n.kt)("th",{parentName:"tr",align:null},"Version"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Openshift"),(0,n.kt)("td",{parentName:"tr",align:null},">= 3.7")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Kubernetes"),(0,n.kt)("td",{parentName:"tr",align:null},">= 1.7")))),(0,n.kt)("h3",{id:"helm-chart-deployment"},"Helm chart deployment"),(0,n.kt)("p",null,"You can find two Helm charts in the ",(0,n.kt)("inlineCode",{parentName:"p"},"charts")," folder:"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"operator-for-redis")," used to deploy the operator in your Kubernetes cluster."),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"node-for-redis")," used to create the ",(0,n.kt)("inlineCode",{parentName:"li"},"RedisCluster")," CR that will be managed by the operator.")),(0,n.kt)("p",null,"Operator deployment example:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install operator-for-redis charts/operator-for-redis\nNAME: operator-for-redis\nLAST DEPLOYED: Fri Aug 13 11:48:29 2021\nNAMESPACE: default\nSTATUS: deployed\n\nRESOURCES:\n==> v1/Deployment\nNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE\noperator-for-redis 1 1 1 1 10s\n")),(0,n.kt)("h4",{id:"create-the-rediscluster"},"Create the RedisCluster"),(0,n.kt)("p",null,"You can configure the topology of the cluster by editing the provided ",(0,n.kt)("inlineCode",{parentName:"p"},"values.yaml"),", using an override file, and/or setting each value with ",(0,n.kt)("inlineCode",{parentName:"p"},"--set")," when you execute ",(0,n.kt)("inlineCode",{parentName:"p"},"helm install"),"."),(0,n.kt)("p",null,"Redis cluster deployment example:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install node-for-redis charts/node-for-redis\nNAME: node-for-redis\nLAST DEPLOYED: Fri Aug 13 11:48:29 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},(0,n.kt)("strong",{parentName:"p"},"! Warning !"),', if you want to use the docker images corresponding to the level of code present in the "main" branch. You need to set the image tag when you instantiate the node-for-redis chart and the operator-for-redis-cluster chart. The "latest" tag is corresponding to the last validated release.')),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"helm install node-for-redis charts/node-for-redis --set image.tag=main-$COMMIT-dev\n")),(0,n.kt)("h3",{id:"install-kubectl-redis-cluster-plugin"},"Install kubectl redis-cluster plugin"),(0,n.kt)("p",null,"Docs available ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/kubectl-plugin"},"here"),"."),(0,n.kt)("h3",{id:"deployment-from-source-code"},"Deployment from source code"),(0,n.kt)("h4",{id:"build-container-images"},"Build container images"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"cd $GOPATH/src/github.com/IBM/operator-for-redis-cluster\nmake container\n")),(0,n.kt)("p",null,'you can define the docker images tag by adding the variable "TAG"'),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make TAG= container\n")),(0,n.kt)("h3",{id:"how-to-release-the-redis-operator"},"How to Release the Redis Operator"),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},"Do the following in ",(0,n.kt)("inlineCode",{parentName:"p"},"main")," branch:"),(0,n.kt)("ol",{parentName:"blockquote"},(0,n.kt)("li",{parentName:"ol"},"Create a tag on commit"),(0,n.kt)("li",{parentName:"ol"},"Push the commit and tag"),(0,n.kt)("li",{parentName:"ol"},"Github actions automation will build and push docker images and helm charts with release version")),(0,n.kt)("p",{parentName:"blockquote"},"NOTE: If you need to test the build prior to the above steps, you can run: ",(0,n.kt)("inlineCode",{parentName:"p"},"make build")," and resolve any issues.")),(0,n.kt)("h3",{id:"how-to-upgrade-redis-client-version"},"How to Upgrade Redis Client Version"),(0,n.kt)("p",null,"To upgrade your Redis client version, you will need to update the ",(0,n.kt)("inlineCode",{parentName:"p"},"REDIS_VERSION")," variable in both the Dockerfile for Redis node and the Github release workflow. Please note that upgrading the Redis client version may impact functionality because the operator depends on the ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/mediocregopher/radix"},"radix library")," for executing Redis commands."))}c.isMDXComponent=!0},8795:(e,t,r)=>{r.d(t,{Z:()=>o});const o=r.p+"assets/images/logo-34d5aecd76e632e1a7d7bc9e9d9d7c67.png"},3:(e,t,r)=>{r.d(t,{Z:()=>o});const o=r.p+"assets/images/overview_1-87d24b48af2c162cd54641cd2424229d.png"}}]); \ No newline at end of file diff --git a/assets/js/81482700.b74ab000.js b/assets/js/81482700.7bacc91a.js similarity index 99% rename from assets/js/81482700.b74ab000.js rename to assets/js/81482700.7bacc91a.js index bd24899d..e1700d24 100644 --- a/assets/js/81482700.b74ab000.js +++ b/assets/js/81482700.7bacc91a.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[350],{3905:(e,r,t)=>{t.d(r,{Zo:()=>p,kt:()=>h});var n=t(7294);function a(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function i(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);r&&(n=n.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,n)}return t}function l(e){for(var r=1;r=0||(a[t]=e[t]);return a}(e,r);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var s=n.createContext({}),c=function(e){var r=n.useContext(s),t=r;return e&&(t="function"==typeof e?e(r):l(l({},r),e)),t},p=function(e){var r=c(e.components);return n.createElement(s.Provider,{value:r},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var r=e.children;return n.createElement(n.Fragment,{},r)}},m=n.forwardRef((function(e,r){var t=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,p=o(e,["components","mdxType","originalType","parentName"]),u=c(t),m=a,h=u["".concat(s,".").concat(m)]||u[m]||d[m]||i;return t?n.createElement(h,l(l({ref:r},p),{},{components:t})):n.createElement(h,l({ref:r},p))}));function h(e,r){var t=arguments,a=r&&r.mdxType;if("string"==typeof e||a){var i=t.length,l=new Array(i);l[0]=m;var o={};for(var s in r)hasOwnProperty.call(r,s)&&(o[s]=r[s]);o.originalType=e,o[u]="string"==typeof e?e:a,l[1]=o;for(var c=2;c{t.r(r),t.d(r,{assets:()=>s,contentTitle:()=>l,default:()=>d,frontMatter:()=>i,metadata:()=>o,toc:()=>c});var n=t(7462),a=(t(7294),t(3905));const i={title:"Scaling Operations",slug:"/scaling"},l="Scaling Operations",o={unversionedId:"scaling",id:"scaling",title:"Scaling Operations",description:"Overview",source:"@site/docs/scaling.md",sourceDirName:".",slug:"/scaling",permalink:"/operator-for-redis-cluster/scaling",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/scaling.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Scaling Operations",slug:"/scaling"},sidebar:"docs",previous:{title:"Redis Server Configuration",permalink:"/operator-for-redis-cluster/configuration"},next:{title:"Rolling Update Procedure",permalink:"/operator-for-redis-cluster/rolling-update"}},s={},c=[{value:"Overview",id:"overview",level:2},{value:"Impact of scaling",id:"impact-of-scaling",level:2},{value:"Resource Requirements",id:"resource-requirements",level:3},{value:"Scaling primaries",id:"scaling-primaries",level:2},{value:"Scaling up",id:"scaling-up",level:3},{value:"Example",id:"example",level:4},{value:"Scaling down",id:"scaling-down",level:3},{value:"Example",id:"example-1",level:4},{value:"Scaling replication factor",id:"scaling-replication-factor",level:2},{value:"Scaling up",id:"scaling-up-1",level:3},{value:"Example",id:"example-2",level:4},{value:"Scaling down",id:"scaling-down-1",level:3},{value:"Example",id:"example-3",level:4},{value:"Scaling primaries and replication factor",id:"scaling-primaries-and-replication-factor",level:2},{value:"Example 1",id:"example-1-1",level:4},{value:"Example 2",id:"example-2-1",level:4}],p={toc:c},u="wrapper";function d(e){let{components:r,...t}=e;return(0,a.kt)(u,(0,n.Z)({},p,t,{components:r,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"scaling-operations"},"Scaling Operations"),(0,a.kt)("h2",{id:"overview"},"Overview"),(0,a.kt)("p",null,"There are many reasons why you would want to scale the number of Redis nodes in your cluster. A few of the most common reasons are: "),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Memory pressure - the nodes in your cluster are close to full capacity (or are at fully capacity and evictions are causing the backend to take more traffic than desired)",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Horizontally scale the number of primaries to better serve requests"),(0,a.kt)("li",{parentName:"ul"},"Vertically scale your current Redis nodes by allocating more memory"))),(0,a.kt)("li",{parentName:"ul"},"CPU bottleneck - throughput is low and impacting system performance",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Horizontally scale the number of primaries to better serve requests"),(0,a.kt)("li",{parentName:"ul"},"Vertically scale your current Redis nodes by allocating more CPUs"))),(0,a.kt)("li",{parentName:"ul"},"Over-provisioning - you have allocated too many resources for your cluster",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Scale down if it does not hurt the performance of your system"),(0,a.kt)("li",{parentName:"ul"},"Scale down the number of primaries to save on costs"),(0,a.kt)("li",{parentName:"ul"},"If you are running a Redis cluster with a high replication factor (RF), consider reducing it"),(0,a.kt)("li",{parentName:"ul"},"In multi-zone clusters, scaling down may reduce availability in the case of a zone outage")))),(0,a.kt)("h2",{id:"impact-of-scaling"},"Impact of scaling"),(0,a.kt)("p",null,"Scaling operations happen in real-time while the Redis cluster receives requests. They are computationally intensive, so expect a decrease in performance while the scaling operation takes place. The extent of the performance impact depends on the size of the data stored in Redis, as well as CPU utilization. See ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/key-migration"},"key migration")," for more information about how Redis keys are migrated from one primary to another during the scaling process."),(0,a.kt)("h3",{id:"resource-requirements"},"Resource Requirements"),(0,a.kt)("p",null,"Like the rolling update procedure, scaling up requires additional resources to create new Redis pods. In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in ",(0,a.kt)("inlineCode",{parentName:"p"},"Pending")," state. If you find your newly created pods are in ",(0,a.kt)("inlineCode",{parentName:"p"},"Pending")," state, increase the memory + cpu allocated to your k8s nodes, or add more nodes to your worker pool."),(0,a.kt)("h2",{id:"scaling-primaries"},"Scaling primaries"),(0,a.kt)("p",null,"The first option for scaling your cluster is scaling the number of primaries. You can trigger a scaling operation by modifying the ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," field in ",(0,a.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and running ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," on your cluster."),(0,a.kt)("h3",{id:"scaling-up"},"Scaling up"),(0,a.kt)("p",null,"Scale up operations take place when the desired number of primaries is greater than the current number of primaries. We take the following actions for scale up operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of primaries added,\n 1. Create a new Redis pod.\n 2. Wait for the pod to become ready.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 3. Check if we have sufficient primaries. If not, promote replicas to primaries. This only happens when you scale up the number of primaries AND scale down RF.\n 4. Add the new Redis nodes to the selection of current primaries.\n 5. Place and attach replicas to their respective primaries.\n 6. Dispatch slots and migrate keys to the new primaries.\n")),(0,a.kt)("p",null,"After this last step, your cluster will be in normal operating state. The primaries nodes will have an equal number of slots and replica nodes will be properly attached."),(0,a.kt)("h4",{id:"example"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Assuming your helm release name is ",(0,a.kt)("inlineCode",{parentName:"p"},"redis-cluster"),", scale up ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5\n")),(0,a.kt)("h3",{id:"scaling-down"},"Scaling down"),(0,a.kt)("p",null,"Scale down operations take place when the desired number of primaries is less than the current number of primaries. We take the following actions for scale down operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of primaries deleted,\n 1. Select one primary to remove.\n 2. Migrate keys from the primary to be removed to the other primaries. Slots are equally distributed across the remaining primaries.\n 3. Detach, forget, and delete the primary to be removed.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 4. Dispatch slots and migrate keys to the new primaries.\n 5. Place and attach replicas to their respective primaries.\n")),(0,a.kt)("p",null,"After this last step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-1"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 5\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Scale down ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=3\n")),(0,a.kt)("h2",{id:"scaling-replication-factor"},"Scaling replication factor"),(0,a.kt)("p",null,"The second option for scaling your cluster is scaling RF. You can trigger a scaling operation by modifying the ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," field in ",(0,a.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and running ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," on your cluster. "),(0,a.kt)("h3",{id:"scaling-up-1"},"Scaling up"),(0,a.kt)("p",null,"Scale up operations for RF take place when the desired RF is greater than the current RF. We take the following actions for scale up operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of replicas added,\n 1. Create a new Redis pod.\n 2. Wait for the pod to become ready.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 3. Add the new Redis nodes to the selection of replicas.\n 4. Place and attach replicas to their respective primaries such that each primary has the same number of replicas.\n 5. Dispatch slots and migrate keys to the new primaries.\n")),(0,a.kt)("p",null,"After this step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-2"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Scale up ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=2\n")),(0,a.kt)("h3",{id:"scaling-down-1"},"Scaling down"),(0,a.kt)("p",null,"Scale down operations for RF take place when the desired RF is less than the current RF. We take the following actions for scale down operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of replicas deleted,\n For each primary in the cluster,\n 1. Calculate the difference between the current RF and desired RF.\n 2. If we do not have sufficient replicas for this primary, select new replicas and attach them to the primary.\n 3. If we have too many replicas, select replicas to delete. Detach, forget, and delete the replica to be removed.\n \nOnce the number of desired Redis pods matches the current number of running pods,\n 4. Place and attach replicas to their respective primaries.\n")),(0,a.kt)("p",null,"After this step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-3"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 2\n")),(0,a.kt)("p",null,"Scale down ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=1\n")),(0,a.kt)("h2",{id:"scaling-primaries-and-replication-factor"},"Scaling primaries and replication factor"),(0,a.kt)("p",null,"You may scale both the number of primaries and replication factor in a single ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," command. The number of pods created or deleted will be calculated and actions will be taken according to the algorithms described in the previous sections. The following is an example of scaling up ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor"),"."),(0,a.kt)("h4",{id:"example-1-1"},"Example 1"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Increase ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=4 --set replicationFactor=2\n")),(0,a.kt)("hr",null),(0,a.kt)("h4",{id:"example-2-1"},"Example 2"),(0,a.kt)("p",null,"You may also scale up one field while scaling down the other:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 4\nreplicationFactor: 2\n")),(0,a.kt)("p",null,"Increase ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and decrease ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5 --set replicationFactor=1\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[350],{3905:(e,r,t)=>{t.d(r,{Zo:()=>p,kt:()=>h});var n=t(7294);function a(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function i(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);r&&(n=n.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,n)}return t}function l(e){for(var r=1;r=0||(a[t]=e[t]);return a}(e,r);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var s=n.createContext({}),c=function(e){var r=n.useContext(s),t=r;return e&&(t="function"==typeof e?e(r):l(l({},r),e)),t},p=function(e){var r=c(e.components);return n.createElement(s.Provider,{value:r},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var r=e.children;return n.createElement(n.Fragment,{},r)}},m=n.forwardRef((function(e,r){var t=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,p=o(e,["components","mdxType","originalType","parentName"]),u=c(t),m=a,h=u["".concat(s,".").concat(m)]||u[m]||d[m]||i;return t?n.createElement(h,l(l({ref:r},p),{},{components:t})):n.createElement(h,l({ref:r},p))}));function h(e,r){var t=arguments,a=r&&r.mdxType;if("string"==typeof e||a){var i=t.length,l=new Array(i);l[0]=m;var o={};for(var s in r)hasOwnProperty.call(r,s)&&(o[s]=r[s]);o.originalType=e,o[u]="string"==typeof e?e:a,l[1]=o;for(var c=2;c{t.r(r),t.d(r,{assets:()=>s,contentTitle:()=>l,default:()=>d,frontMatter:()=>i,metadata:()=>o,toc:()=>c});var n=t(7462),a=(t(7294),t(3905));const i={title:"Scaling Operations",slug:"/scaling"},l="Scaling Operations",o={unversionedId:"scaling",id:"scaling",title:"Scaling Operations",description:"Overview",source:"@site/docs/scaling.md",sourceDirName:".",slug:"/scaling",permalink:"/operator-for-redis-cluster/scaling",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/scaling.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Scaling Operations",slug:"/scaling"},sidebar:"docs",previous:{title:"Redis Server Configuration",permalink:"/operator-for-redis-cluster/configuration"},next:{title:"Rolling Update Procedure",permalink:"/operator-for-redis-cluster/rolling-update"}},s={},c=[{value:"Overview",id:"overview",level:2},{value:"Impact of scaling",id:"impact-of-scaling",level:2},{value:"Resource Requirements",id:"resource-requirements",level:3},{value:"Scaling primaries",id:"scaling-primaries",level:2},{value:"Scaling up",id:"scaling-up",level:3},{value:"Example",id:"example",level:4},{value:"Scaling down",id:"scaling-down",level:3},{value:"Example",id:"example-1",level:4},{value:"Scaling replication factor",id:"scaling-replication-factor",level:2},{value:"Scaling up",id:"scaling-up-1",level:3},{value:"Example",id:"example-2",level:4},{value:"Scaling down",id:"scaling-down-1",level:3},{value:"Example",id:"example-3",level:4},{value:"Scaling primaries and replication factor",id:"scaling-primaries-and-replication-factor",level:2},{value:"Example 1",id:"example-1-1",level:4},{value:"Example 2",id:"example-2-1",level:4}],p={toc:c},u="wrapper";function d(e){let{components:r,...t}=e;return(0,a.kt)(u,(0,n.Z)({},p,t,{components:r,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"scaling-operations"},"Scaling Operations"),(0,a.kt)("h2",{id:"overview"},"Overview"),(0,a.kt)("p",null,"There are many reasons why you would want to scale the number of Redis nodes in your cluster. A few of the most common reasons are: "),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Memory pressure - the nodes in your cluster are close to full capacity (or are at fully capacity and evictions are causing the backend to take more traffic than desired)",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Horizontally scale the number of primaries to better serve requests"),(0,a.kt)("li",{parentName:"ul"},"Vertically scale your current Redis nodes by allocating more memory"))),(0,a.kt)("li",{parentName:"ul"},"CPU bottleneck - throughput is low and impacting system performance",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Horizontally scale the number of primaries to better serve requests"),(0,a.kt)("li",{parentName:"ul"},"Vertically scale your current Redis nodes by allocating more CPUs"))),(0,a.kt)("li",{parentName:"ul"},"Over-provisioning - you have allocated too many resources for your cluster",(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Scale down if it does not hurt the performance of your system"),(0,a.kt)("li",{parentName:"ul"},"Scale down the number of primaries to save on costs"),(0,a.kt)("li",{parentName:"ul"},"If you are running a Redis cluster with a high replication factor (RF), consider reducing it"),(0,a.kt)("li",{parentName:"ul"},"In multi-zone clusters, scaling down may reduce availability in the case of a zone outage")))),(0,a.kt)("h2",{id:"impact-of-scaling"},"Impact of scaling"),(0,a.kt)("p",null,"Scaling operations happen in real-time while the Redis cluster receives requests. They are computationally intensive, so expect a decrease in performance while the scaling operation takes place. The extent of the performance impact depends on the size of the data stored in Redis, as well as CPU utilization. See ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/key-migration"},"key migration")," for more information about how Redis keys are migrated from one primary to another during the scaling process."),(0,a.kt)("h3",{id:"resource-requirements"},"Resource Requirements"),(0,a.kt)("p",null,"Like the rolling update procedure, scaling up requires additional resources to create new Redis pods. In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in ",(0,a.kt)("inlineCode",{parentName:"p"},"Pending")," state. If you find your newly created pods are in ",(0,a.kt)("inlineCode",{parentName:"p"},"Pending")," state, increase the memory + cpu allocated to your k8s nodes, or add more nodes to your worker pool."),(0,a.kt)("h2",{id:"scaling-primaries"},"Scaling primaries"),(0,a.kt)("p",null,"The first option for scaling your cluster is scaling the number of primaries. You can trigger a scaling operation by modifying the ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," field in ",(0,a.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and running ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," on your cluster."),(0,a.kt)("h3",{id:"scaling-up"},"Scaling up"),(0,a.kt)("p",null,"Scale up operations take place when the desired number of primaries is greater than the current number of primaries. We take the following actions for scale up operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of primaries added,\n 1. Create a new Redis pod.\n 2. Wait for the pod to become ready.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 3. Check if we have sufficient primaries. If not, promote replicas to primaries. This only happens when you scale up the number of primaries AND scale down RF.\n 4. Add the new Redis nodes to the selection of current primaries.\n 5. Place and attach replicas to their respective primaries.\n 6. Dispatch slots and migrate keys to the new primaries.\n")),(0,a.kt)("p",null,"After this last step, your cluster will be in normal operating state. The primaries nodes will have an equal number of slots and replica nodes will be properly attached."),(0,a.kt)("h4",{id:"example"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Assuming your helm release name is ",(0,a.kt)("inlineCode",{parentName:"p"},"redis-cluster"),", scale up ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5\n")),(0,a.kt)("h3",{id:"scaling-down"},"Scaling down"),(0,a.kt)("p",null,"Scale down operations take place when the desired number of primaries is less than the current number of primaries. We take the following actions for scale down operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of primaries deleted,\n 1. Select one primary to remove.\n 2. Migrate keys from the primary to be removed to the other primaries. Slots are equally distributed across the remaining primaries.\n 3. Detach, forget, and delete the primary to be removed.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 4. Dispatch slots and migrate keys to the new primaries.\n 5. Place and attach replicas to their respective primaries.\n")),(0,a.kt)("p",null,"After this last step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-1"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 5\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Scale down ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=3\n")),(0,a.kt)("h2",{id:"scaling-replication-factor"},"Scaling replication factor"),(0,a.kt)("p",null,"The second option for scaling your cluster is scaling RF. You can trigger a scaling operation by modifying the ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," field in ",(0,a.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and running ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," on your cluster. "),(0,a.kt)("h3",{id:"scaling-up-1"},"Scaling up"),(0,a.kt)("p",null,"Scale up operations for RF take place when the desired RF is greater than the current RF. We take the following actions for scale up operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of replicas added,\n 1. Create a new Redis pod.\n 2. Wait for the pod to become ready.\n\nOnce the number of desired Redis pods matches the current number of running pods,\n 3. Add the new Redis nodes to the selection of replicas.\n 4. Place and attach replicas to their respective primaries such that each primary has the same number of replicas.\n 5. Dispatch slots and migrate keys to the new primaries.\n")),(0,a.kt)("p",null,"After this step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-2"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Scale up ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=2\n")),(0,a.kt)("h3",{id:"scaling-down-1"},"Scaling down"),(0,a.kt)("p",null,"Scale down operations for RF take place when the desired RF is less than the current RF. We take the following actions for scale down operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"For the number of replicas deleted,\n For each primary in the cluster,\n 1. Calculate the difference between the current RF and desired RF.\n 2. If we do not have sufficient replicas for this primary, select new replicas and attach them to the primary.\n 3. If we have too many replicas, select replicas to delete. Detach, forget, and delete the replica to be removed.\n \nOnce the number of desired Redis pods matches the current number of running pods,\n 4. Place and attach replicas to their respective primaries.\n")),(0,a.kt)("p",null,"After this step, your cluster will be in normal operating state."),(0,a.kt)("h4",{id:"example-3"},"Example"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 2\n")),(0,a.kt)("p",null,"Scale down ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=1\n")),(0,a.kt)("h2",{id:"scaling-primaries-and-replication-factor"},"Scaling primaries and replication factor"),(0,a.kt)("p",null,"You may scale both the number of primaries and replication factor in a single ",(0,a.kt)("inlineCode",{parentName:"p"},"helm upgrade")," command. The number of pods created or deleted will be calculated and actions will be taken according to the algorithms described in the previous sections. The following is an example of scaling up ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor"),"."),(0,a.kt)("h4",{id:"example-1-1"},"Example 1"),(0,a.kt)("p",null,"Given a Redis cluster with the following config:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 3\nreplicationFactor: 1\n")),(0,a.kt)("p",null,"Increase ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=4 --set replicationFactor=2\n")),(0,a.kt)("hr",null),(0,a.kt)("h4",{id:"example-2-1"},"Example 2"),(0,a.kt)("p",null,"You may also scale up one field while scaling down the other:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"numberOfPrimaries: 4\nreplicationFactor: 2\n")),(0,a.kt)("p",null,"Increase ",(0,a.kt)("inlineCode",{parentName:"p"},"numberOfPrimaries")," and decrease ",(0,a.kt)("inlineCode",{parentName:"p"},"replicationFactor")," by running the following:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5 --set replicationFactor=1\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/8d193b98.04a07b21.js b/assets/js/8d193b98.20c777c8.js similarity index 99% rename from assets/js/8d193b98.04a07b21.js rename to assets/js/8d193b98.20c777c8.js index 8c9218e5..afd0b88d 100644 --- a/assets/js/8d193b98.04a07b21.js +++ b/assets/js/8d193b98.20c777c8.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[505],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>f});var r=n(7294);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=r.createContext({}),u=function(e){var t=r.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=u(e.components);return r.createElement(p.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var n=e.components,o=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),c=u(n),m=o,f=c["".concat(p,".").concat(m)]||c[m]||d[m]||a;return n?r.createElement(f,i(i({ref:t},s),{},{components:n})):r.createElement(f,i({ref:t},s))}));function f(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var a=n.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[c]="string"==typeof e?e:o,i[1]=l;for(var u=2;u{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>a,metadata:()=>l,toc:()=>u});var r=n(7462),o=(n(7294),n(3905));const a={title:"Contributing",slug:"/contributing"},i="Contributing",l={unversionedId:"CONTRIBUTING",id:"CONTRIBUTING",title:"Contributing",description:"Set up your machine",source:"@site/docs/CONTRIBUTING.md",sourceDirName:".",slug:"/contributing",permalink:"/operator-for-redis-cluster/contributing",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/CONTRIBUTING.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Contributing",slug:"/contributing"},sidebar:"docs",previous:{title:"Key Migration",permalink:"/operator-for-redis-cluster/key-migration"}},p={},u=[{value:"Set up your machine",id:"set-up-your-machine",level:2},{value:"Development process",id:"development-process",level:2},{value:"Create a branch",id:"create-a-branch",level:3},{value:"Commit your code",id:"commit-your-code",level:3},{value:"End-to-end tests",id:"end-to-end-tests",level:3},{value:"Submit a pull request",id:"submit-a-pull-request",level:3}],s={toc:u},c="wrapper";function d(e){let{components:t,...n}=e;return(0,o.kt)(c,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"contributing"},"Contributing"),(0,o.kt)("h2",{id:"set-up-your-machine"},"Set up your machine"),(0,o.kt)("p",null,"Refer to our ",(0,o.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook#installation"},"cookbook")," to learn how to set up your machine."),(0,o.kt)("h2",{id:"development-process"},"Development process"),(0,o.kt)("p",null,"This section assumes you have already set up your environment to build and install the Redis operator and cluster."),(0,o.kt)("h3",{id:"create-a-branch"},"Create a branch"),(0,o.kt)("p",null,"The first step to contributing is creating a branch off of the ",(0,o.kt)("inlineCode",{parentName:"p"},"main")," branch in your forked project. Branch names should be well formatted. Start your branch name with a type. Choose one of the following:\n",(0,o.kt)("inlineCode",{parentName:"p"},"feat"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"fix"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"bug"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"docs"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"style"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"refactor"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"perf"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"test"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"add"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"remove"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"move"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"bump"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"update"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"release")),(0,o.kt)("p",null,"Example:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},"$ git checkout -b feat/node-scaling\n")),(0,o.kt)("h3",{id:"commit-your-code"},"Commit your code"),(0,o.kt)("p",null,"Make your desired changes to the branch and then commit your work:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},'$ git add .\n$ git commit -m ""\n$ git push --set-upstream origin \n')),(0,o.kt)("p",null,"When you are ready to make a pull request, we suggest you run:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},'$ make generate\n//path/to/go/bin/controller-gen object paths="./..."\n$ make fmt\nfind . -name \'*.go\' -not -wholename \'./vendor/*\' | while read -r file; do gofmt -w -s "$file"; goimports -w "$file"; done\n$ make lint\ngolangci-lint run --enable exportloopref\n$ make test\n./go.test.sh\n...\n')),(0,o.kt)("p",null,"These steps will:"),(0,o.kt)("ol",null,(0,o.kt)("li",{parentName:"ol"},"Regenerate the RedisCluster CRD"),(0,o.kt)("li",{parentName:"ol"},"Format the code according to ",(0,o.kt)("inlineCode",{parentName:"li"},"gofmt")," standards"),(0,o.kt)("li",{parentName:"ol"},"Run the linter"),(0,o.kt)("li",{parentName:"ol"},"Run the unit tests")),(0,o.kt)("h3",{id:"end-to-end-tests"},"End-to-end tests"),(0,o.kt)("p",null,"To run the end-to-end tests, you need to have a running Kubernetes cluster. Follow the steps in the ",(0,o.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook#run-end-to-end-tests"},"cookbook"),"."),(0,o.kt)("h3",{id:"submit-a-pull-request"},"Submit a pull request"),(0,o.kt)("p",null,"Push your branch to your ",(0,o.kt)("inlineCode",{parentName:"p"},"redis-operator")," fork and open a pull request against the ",(0,o.kt)("inlineCode",{parentName:"p"},"main")," branch in the official project. When you open a PR, be sure to include a description explaining your changes, as well as"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-text"},"Resolves #\n")),(0,o.kt)("p",null,"We also ask that you add labels describing the t-shirt size of the task (S, M, L, XL) and the task type (enhancement, documentation, bug, etc.)."))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[505],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>f});var r=n(7294);function o(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var p=r.createContext({}),u=function(e){var t=r.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=u(e.components);return r.createElement(p.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var n=e.components,o=e.mdxType,a=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),c=u(n),m=o,f=c["".concat(p,".").concat(m)]||c[m]||d[m]||a;return n?r.createElement(f,i(i({ref:t},s),{},{components:n})):r.createElement(f,i({ref:t},s))}));function f(e,t){var n=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var a=n.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[c]="string"==typeof e?e:o,i[1]=l;for(var u=2;u{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>a,metadata:()=>l,toc:()=>u});var r=n(7462),o=(n(7294),n(3905));const a={title:"Contributing",slug:"/contributing"},i="Contributing",l={unversionedId:"CONTRIBUTING",id:"CONTRIBUTING",title:"Contributing",description:"Set up your machine",source:"@site/docs/CONTRIBUTING.md",sourceDirName:".",slug:"/contributing",permalink:"/operator-for-redis-cluster/contributing",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/CONTRIBUTING.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Contributing",slug:"/contributing"},sidebar:"docs",previous:{title:"Key Migration",permalink:"/operator-for-redis-cluster/key-migration"}},p={},u=[{value:"Set up your machine",id:"set-up-your-machine",level:2},{value:"Development process",id:"development-process",level:2},{value:"Create a branch",id:"create-a-branch",level:3},{value:"Commit your code",id:"commit-your-code",level:3},{value:"End-to-end tests",id:"end-to-end-tests",level:3},{value:"Submit a pull request",id:"submit-a-pull-request",level:3}],s={toc:u},c="wrapper";function d(e){let{components:t,...n}=e;return(0,o.kt)(c,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"contributing"},"Contributing"),(0,o.kt)("h2",{id:"set-up-your-machine"},"Set up your machine"),(0,o.kt)("p",null,"Refer to our ",(0,o.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook#installation"},"cookbook")," to learn how to set up your machine."),(0,o.kt)("h2",{id:"development-process"},"Development process"),(0,o.kt)("p",null,"This section assumes you have already set up your environment to build and install the Redis operator and cluster."),(0,o.kt)("h3",{id:"create-a-branch"},"Create a branch"),(0,o.kt)("p",null,"The first step to contributing is creating a branch off of the ",(0,o.kt)("inlineCode",{parentName:"p"},"main")," branch in your forked project. Branch names should be well formatted. Start your branch name with a type. Choose one of the following:\n",(0,o.kt)("inlineCode",{parentName:"p"},"feat"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"fix"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"bug"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"docs"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"style"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"refactor"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"perf"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"test"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"add"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"remove"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"move"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"bump"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"update"),", ",(0,o.kt)("inlineCode",{parentName:"p"},"release")),(0,o.kt)("p",null,"Example:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},"$ git checkout -b feat/node-scaling\n")),(0,o.kt)("h3",{id:"commit-your-code"},"Commit your code"),(0,o.kt)("p",null,"Make your desired changes to the branch and then commit your work:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},'$ git add .\n$ git commit -m ""\n$ git push --set-upstream origin \n')),(0,o.kt)("p",null,"When you are ready to make a pull request, we suggest you run:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-console"},'$ make generate\n//path/to/go/bin/controller-gen object paths="./..."\n$ make fmt\nfind . -name \'*.go\' -not -wholename \'./vendor/*\' | while read -r file; do gofmt -w -s "$file"; goimports -w "$file"; done\n$ make lint\ngolangci-lint run --enable exportloopref\n$ make test\n./go.test.sh\n...\n')),(0,o.kt)("p",null,"These steps will:"),(0,o.kt)("ol",null,(0,o.kt)("li",{parentName:"ol"},"Regenerate the RedisCluster CRD"),(0,o.kt)("li",{parentName:"ol"},"Format the code according to ",(0,o.kt)("inlineCode",{parentName:"li"},"gofmt")," standards"),(0,o.kt)("li",{parentName:"ol"},"Run the linter"),(0,o.kt)("li",{parentName:"ol"},"Run the unit tests")),(0,o.kt)("h3",{id:"end-to-end-tests"},"End-to-end tests"),(0,o.kt)("p",null,"To run the end-to-end tests, you need to have a running Kubernetes cluster. Follow the steps in the ",(0,o.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/cookbook#run-end-to-end-tests"},"cookbook"),"."),(0,o.kt)("h3",{id:"submit-a-pull-request"},"Submit a pull request"),(0,o.kt)("p",null,"Push your branch to your ",(0,o.kt)("inlineCode",{parentName:"p"},"redis-operator")," fork and open a pull request against the ",(0,o.kt)("inlineCode",{parentName:"p"},"main")," branch in the official project. When you open a PR, be sure to include a description explaining your changes, as well as"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-text"},"Resolves #\n")),(0,o.kt)("p",null,"We also ask that you add labels describing the t-shirt size of the task (S, M, L, XL) and the task type (enhancement, documentation, bug, etc.)."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/9ed00105.77f97e25.js b/assets/js/9ed00105.8df2f6a6.js similarity index 99% rename from assets/js/9ed00105.77f97e25.js rename to assets/js/9ed00105.8df2f6a6.js index b0ab9bf7..873cab76 100644 --- a/assets/js/9ed00105.77f97e25.js +++ b/assets/js/9ed00105.8df2f6a6.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[4],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>m});var a=n(7294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var l=a.createContext({}),u=function(e){var t=a.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=u(e.components);return a.createElement(l.Provider,{value:t},e.children)},d="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},f=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=u(n),f=i,m=d["".concat(l,".").concat(f)]||d[f]||p[f]||o;return n?a.createElement(m,r(r({ref:t},c),{},{components:n})):a.createElement(m,r({ref:t},c))}));function m(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=f;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[d]="string"==typeof e?e:i,r[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>r,default:()=>p,frontMatter:()=>o,metadata:()=>s,toc:()=>u});var a=n(7462),i=(n(7294),n(3905));const o={title:"Redis Server Configuration",slug:"/configuration"},r="Redis Server Configuration",s={unversionedId:"configuration",id:"configuration",title:"Redis Server Configuration",description:"Overview",source:"@site/docs/configuration.md",sourceDirName:".",slug:"/configuration",permalink:"/operator-for-redis-cluster/configuration",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/configuration.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Redis Server Configuration",slug:"/configuration"},sidebar:"docs",previous:{title:"Kubectl Plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin"},next:{title:"Scaling Operations",permalink:"/operator-for-redis-cluster/scaling"}},l={},u=[{value:"Overview",id:"overview",level:2},{value:"Redis Cluster Configuration",id:"redis-cluster-configuration",level:2},{value:"Configuration Options",id:"configuration-options",level:2},{value:"Defaults",id:"defaults",level:3},{value:"Persistence",id:"persistence",level:3},{value:"Snapshotting",id:"snapshotting",level:4},{value:"Max Memory",id:"max-memory",level:3},{value:"Eviction Policies",id:"eviction-policies",level:3},{value:"Overriding redis.conf",id:"overriding-redisconf",level:2},{value:"Configuration Examples",id:"configuration-examples",level:3},{value:"Redis as a Database",id:"redis-as-a-database",level:4},{value:"Redis as a Cache",id:"redis-as-a-cache",level:4}],c={toc:u},d="wrapper";function p(e){let{components:t,...n}=e;return(0,i.kt)(d,(0,a.Z)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"redis-server-configuration"},"Redis Server Configuration"),(0,i.kt)("h2",{id:"overview"},"Overview"),(0,i.kt)("p",null,"A Redis server can be configured by providing a Redis configuration file called ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf"),". To read more about the format of this file, see the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/config"},"configuration documentation"),"."),(0,i.kt)("h2",{id:"redis-cluster-configuration"},"Redis Cluster Configuration"),(0,i.kt)("p",null,"The Redis operator manages clusters that operate in ",(0,i.kt)("strong",{parentName:"p"},"cluster mode"),". This means every node in the cluster specifies the ",(0,i.kt)("inlineCode",{parentName:"p"},"cluster-enabled yes")," configuration option. It also means that every node in the cluster will have the same configuration. You do not need to set ",(0,i.kt)("inlineCode",{parentName:"p"},"cluster-enabled")," explicitly in your configuration because we automatically add the setting if it is not present when a Redis pod starts."),(0,i.kt)("p",null,"Redis clusters that operate in cluster mode support data sharding, which is essential to ensuring high availability. See the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/cluster-spec"},"Redis cluster specification")," to learn more."),(0,i.kt)("h2",{id:"configuration-options"},"Configuration Options"),(0,i.kt)("p",null,"There are various configuration options you will want to consider whether you are using Redis as a cache or as a persistent database. We urge you to read the ",(0,i.kt)("a",{parentName:"p",href:"https://raw.githubusercontent.com/redis/redis/6.2/redis.conf"},"Redis configuration documentation")," to understand the tradeoffs for each option."),(0,i.kt)("h3",{id:"defaults"},"Defaults"),(0,i.kt)("p",null,"A Redis server is able to start without specifying a ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," configuration file or providing override configuration; it will instead use the default settings. We do not recommend using the defaults in a production environment, as your Redis database can quickly exceed the amount of memory allocated to your Redis pods. Our operator currently deploys and manages Redis clusters using Redis 6.2. Read the default configuration for ",(0,i.kt)("a",{parentName:"p",href:"https://raw.githubusercontent.com/redis/redis/6.2/redis.conf"},"Redis 6.2")," to learn more about the specific settings."),(0,i.kt)("h3",{id:"persistence"},"Persistence"),(0,i.kt)("p",null,"If you use Redis as a database, you will need to enable persistence. Redis provides multiple persistence options, such as Redis Database (RDB) and Append Only File (AOF). You can read more about the advantages and disadvantages of each persistence option in the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/persistence"},"persistence documentation"),"."),(0,i.kt)("h4",{id:"snapshotting"},"Snapshotting"),(0,i.kt)("p",null,"To quote the Redis documentation:"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"By default Redis saves snapshots of the dataset on disk, in a binary file called dump.rdb. You can configure Redis to have it save the dataset every N seconds if there are at least M changes in the dataset, or you can manually call the SAVE or BGSAVE commands.")),(0,i.kt)("p",null,"Using the default settings, snapshotting will occur:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"after 3600 sec (60 min) if at least 1 key changed"),(0,i.kt)("li",{parentName:"ul"},"after 300 sec (5 min) if at least 10 keys changed"),(0,i.kt)("li",{parentName:"ul"},"after 60 sec if at least 10000 keys changed")),(0,i.kt)("p",null,"Snapshots can be extremely useful for backups and faster restarts. We recommend configuring ",(0,i.kt)("inlineCode",{parentName:"p"},"save")," to a reasonable value depending on the number of requests per second your database processes."),(0,i.kt)("p",null,"If you use Redis as a cache, be sure to disable snapshotting by setting ",(0,i.kt)("inlineCode",{parentName:"p"},'save ""')," in ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf"),". For a large Redis cluster processing thousands of requests per second, disk can fill up fairly quickly with database snapshots. Disabling snapshotting will prevent Redis from dumping the entire dataset to disk all together."),(0,i.kt)("h3",{id:"max-memory"},"Max Memory"),(0,i.kt)("p",null,"To quote the Redis documentation:"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"The ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," configuration directive is used in order to configure Redis to use a specified amount of memory for the data set.")),(0,i.kt)("p",null,"We highly encourage setting ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," to a value lower than the allocated memory to each Redis pod. By default, we set ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," to 70% of the allocated Redis pod memory. You should change this value depending on how much additional memory the Redis process consumes doing other operations."),(0,i.kt)("h3",{id:"eviction-policies"},"Eviction Policies"),(0,i.kt)("p",null,"See ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/lru-cache"},"Using Redis as an LRU cache")," to learn more about which ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory-policy")," is best for your needs. If you use Redis as a database, you will likely want to keep the default ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory-policy")," set to ",(0,i.kt)("inlineCode",{parentName:"p"},"noeviction"),"."),(0,i.kt)("h2",{id:"overriding-redisconf"},"Overriding redis.conf"),(0,i.kt)("p",null,"You have two separate options for overriding the default ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," when deploying a Redis cluster. The first is to specify your configuration as key-value pairs in ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.configuration.valueMap"),":"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "volatile-lfu"\n maxmemory: "10Gb"\n ...\n')),(0,i.kt)("p",null,"Note: be sure to always quote values if you decide to use this approach."),(0,i.kt)("p",null,"The second option is to specify ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.configuration.file")," with the path to your ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," file. For example:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n file: "/path/to/redis.conf"\n')),(0,i.kt)("p",null,"Your ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," file should have the same format as any Redis configuration file:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-text"},"maxmemory-policy volatile-lfu\nmaxmemory 10gb\n...\n")),(0,i.kt)("p",null,"Note: do ",(0,i.kt)("strong",{parentName:"p"},"not")," quote values in Redis configuration files."),(0,i.kt)("h3",{id:"configuration-examples"},"Configuration Examples"),(0,i.kt)("h4",{id:"redis-as-a-database"},"Redis as a Database"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "noeviction" # Do not evict keys even if memory is full\n maxmemory: "100gb"\n save: "3600 1 300 10 60 10000" # Enable RDB persistence to perform snapshots (see Snapshotting section)\n appendonly: "yes" # Enable AOF persistence\n \n')),(0,i.kt)("h4",{id:"redis-as-a-cache"},"Redis as a Cache"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "volatile-lfu" # Expire keys based on least frequently used policy\n maxmemory: "10gb"\n save: "" # Disable saving snapshots of the database to disk\n lazyfree-lazy-eviction: "yes" # Asynchronously evict keys\n lazyfree-lazy-expire: "yes" # Asynchronously delete expired keys\n lazyfree-lazy-server-del: "yes" # Asynchronously delete keys during specific server commands\n replica-lazy-flush: "yes" # Asynchronously flush keys after replica resynchronization\n cluster-require-full-coverage: "no" # Accept queries even when only part of key space is covered\n cluster-allow-reads-when-down: "yes" # Allow nodes to serve reads while cluster is down\n')))}p.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[4],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>m});var a=n(7294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function r(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var l=a.createContext({}),u=function(e){var t=a.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):r(r({},t),e)),n},c=function(e){var t=u(e.components);return a.createElement(l.Provider,{value:t},e.children)},d="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},f=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),d=u(n),f=i,m=d["".concat(l,".").concat(f)]||d[f]||p[f]||o;return n?a.createElement(m,r(r({ref:t},c),{},{components:n})):a.createElement(m,r({ref:t},c))}));function m(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,r=new Array(o);r[0]=f;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[d]="string"==typeof e?e:i,r[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>r,default:()=>p,frontMatter:()=>o,metadata:()=>s,toc:()=>u});var a=n(7462),i=(n(7294),n(3905));const o={title:"Redis Server Configuration",slug:"/configuration"},r="Redis Server Configuration",s={unversionedId:"configuration",id:"configuration",title:"Redis Server Configuration",description:"Overview",source:"@site/docs/configuration.md",sourceDirName:".",slug:"/configuration",permalink:"/operator-for-redis-cluster/configuration",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/configuration.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Redis Server Configuration",slug:"/configuration"},sidebar:"docs",previous:{title:"Kubectl Plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin"},next:{title:"Scaling Operations",permalink:"/operator-for-redis-cluster/scaling"}},l={},u=[{value:"Overview",id:"overview",level:2},{value:"Redis Cluster Configuration",id:"redis-cluster-configuration",level:2},{value:"Configuration Options",id:"configuration-options",level:2},{value:"Defaults",id:"defaults",level:3},{value:"Persistence",id:"persistence",level:3},{value:"Snapshotting",id:"snapshotting",level:4},{value:"Max Memory",id:"max-memory",level:3},{value:"Eviction Policies",id:"eviction-policies",level:3},{value:"Overriding redis.conf",id:"overriding-redisconf",level:2},{value:"Configuration Examples",id:"configuration-examples",level:3},{value:"Redis as a Database",id:"redis-as-a-database",level:4},{value:"Redis as a Cache",id:"redis-as-a-cache",level:4}],c={toc:u},d="wrapper";function p(e){let{components:t,...n}=e;return(0,i.kt)(d,(0,a.Z)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"redis-server-configuration"},"Redis Server Configuration"),(0,i.kt)("h2",{id:"overview"},"Overview"),(0,i.kt)("p",null,"A Redis server can be configured by providing a Redis configuration file called ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf"),". To read more about the format of this file, see the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/config"},"configuration documentation"),"."),(0,i.kt)("h2",{id:"redis-cluster-configuration"},"Redis Cluster Configuration"),(0,i.kt)("p",null,"The Redis operator manages clusters that operate in ",(0,i.kt)("strong",{parentName:"p"},"cluster mode"),". This means every node in the cluster specifies the ",(0,i.kt)("inlineCode",{parentName:"p"},"cluster-enabled yes")," configuration option. It also means that every node in the cluster will have the same configuration. You do not need to set ",(0,i.kt)("inlineCode",{parentName:"p"},"cluster-enabled")," explicitly in your configuration because we automatically add the setting if it is not present when a Redis pod starts."),(0,i.kt)("p",null,"Redis clusters that operate in cluster mode support data sharding, which is essential to ensuring high availability. See the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/cluster-spec"},"Redis cluster specification")," to learn more."),(0,i.kt)("h2",{id:"configuration-options"},"Configuration Options"),(0,i.kt)("p",null,"There are various configuration options you will want to consider whether you are using Redis as a cache or as a persistent database. We urge you to read the ",(0,i.kt)("a",{parentName:"p",href:"https://raw.githubusercontent.com/redis/redis/6.2/redis.conf"},"Redis configuration documentation")," to understand the tradeoffs for each option."),(0,i.kt)("h3",{id:"defaults"},"Defaults"),(0,i.kt)("p",null,"A Redis server is able to start without specifying a ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," configuration file or providing override configuration; it will instead use the default settings. We do not recommend using the defaults in a production environment, as your Redis database can quickly exceed the amount of memory allocated to your Redis pods. Our operator currently deploys and manages Redis clusters using Redis 6.2. Read the default configuration for ",(0,i.kt)("a",{parentName:"p",href:"https://raw.githubusercontent.com/redis/redis/6.2/redis.conf"},"Redis 6.2")," to learn more about the specific settings."),(0,i.kt)("h3",{id:"persistence"},"Persistence"),(0,i.kt)("p",null,"If you use Redis as a database, you will need to enable persistence. Redis provides multiple persistence options, such as Redis Database (RDB) and Append Only File (AOF). You can read more about the advantages and disadvantages of each persistence option in the ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/persistence"},"persistence documentation"),"."),(0,i.kt)("h4",{id:"snapshotting"},"Snapshotting"),(0,i.kt)("p",null,"To quote the Redis documentation:"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"By default Redis saves snapshots of the dataset on disk, in a binary file called dump.rdb. You can configure Redis to have it save the dataset every N seconds if there are at least M changes in the dataset, or you can manually call the SAVE or BGSAVE commands.")),(0,i.kt)("p",null,"Using the default settings, snapshotting will occur:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"after 3600 sec (60 min) if at least 1 key changed"),(0,i.kt)("li",{parentName:"ul"},"after 300 sec (5 min) if at least 10 keys changed"),(0,i.kt)("li",{parentName:"ul"},"after 60 sec if at least 10000 keys changed")),(0,i.kt)("p",null,"Snapshots can be extremely useful for backups and faster restarts. We recommend configuring ",(0,i.kt)("inlineCode",{parentName:"p"},"save")," to a reasonable value depending on the number of requests per second your database processes."),(0,i.kt)("p",null,"If you use Redis as a cache, be sure to disable snapshotting by setting ",(0,i.kt)("inlineCode",{parentName:"p"},'save ""')," in ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf"),". For a large Redis cluster processing thousands of requests per second, disk can fill up fairly quickly with database snapshots. Disabling snapshotting will prevent Redis from dumping the entire dataset to disk all together."),(0,i.kt)("h3",{id:"max-memory"},"Max Memory"),(0,i.kt)("p",null,"To quote the Redis documentation:"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"The ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," configuration directive is used in order to configure Redis to use a specified amount of memory for the data set.")),(0,i.kt)("p",null,"We highly encourage setting ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," to a value lower than the allocated memory to each Redis pod. By default, we set ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory")," to 70% of the allocated Redis pod memory. You should change this value depending on how much additional memory the Redis process consumes doing other operations."),(0,i.kt)("h3",{id:"eviction-policies"},"Eviction Policies"),(0,i.kt)("p",null,"See ",(0,i.kt)("a",{parentName:"p",href:"https://redis.io/topics/lru-cache"},"Using Redis as an LRU cache")," to learn more about which ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory-policy")," is best for your needs. If you use Redis as a database, you will likely want to keep the default ",(0,i.kt)("inlineCode",{parentName:"p"},"maxmemory-policy")," set to ",(0,i.kt)("inlineCode",{parentName:"p"},"noeviction"),"."),(0,i.kt)("h2",{id:"overriding-redisconf"},"Overriding redis.conf"),(0,i.kt)("p",null,"You have two separate options for overriding the default ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," when deploying a Redis cluster. The first is to specify your configuration as key-value pairs in ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.configuration.valueMap"),":"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "volatile-lfu"\n maxmemory: "10Gb"\n ...\n')),(0,i.kt)("p",null,"Note: be sure to always quote values if you decide to use this approach."),(0,i.kt)("p",null,"The second option is to specify ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.configuration.file")," with the path to your ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," file. For example:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n file: "/path/to/redis.conf"\n')),(0,i.kt)("p",null,"Your ",(0,i.kt)("inlineCode",{parentName:"p"},"redis.conf")," file should have the same format as any Redis configuration file:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-text"},"maxmemory-policy volatile-lfu\nmaxmemory 10gb\n...\n")),(0,i.kt)("p",null,"Note: do ",(0,i.kt)("strong",{parentName:"p"},"not")," quote values in Redis configuration files."),(0,i.kt)("h3",{id:"configuration-examples"},"Configuration Examples"),(0,i.kt)("h4",{id:"redis-as-a-database"},"Redis as a Database"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "noeviction" # Do not evict keys even if memory is full\n maxmemory: "100gb"\n save: "3600 1 300 10 60 10000" # Enable RDB persistence to perform snapshots (see Snapshotting section)\n appendonly: "yes" # Enable AOF persistence\n \n')),(0,i.kt)("h4",{id:"redis-as-a-cache"},"Redis as a Cache"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-yaml"},'redis:\n configuration:\n valueMap:\n maxmemory-policy: "volatile-lfu" # Expire keys based on least frequently used policy\n maxmemory: "10gb"\n save: "" # Disable saving snapshots of the database to disk\n lazyfree-lazy-eviction: "yes" # Asynchronously evict keys\n lazyfree-lazy-expire: "yes" # Asynchronously delete expired keys\n lazyfree-lazy-server-del: "yes" # Asynchronously delete keys during specific server commands\n replica-lazy-flush: "yes" # Asynchronously flush keys after replica resynchronization\n cluster-require-full-coverage: "no" # Accept queries even when only part of key space is covered\n cluster-allow-reads-when-down: "yes" # Allow nodes to serve reads while cluster is down\n')))}p.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/b9207088.0df17469.js b/assets/js/b9207088.f80f3130.js similarity index 99% rename from assets/js/b9207088.0df17469.js rename to assets/js/b9207088.f80f3130.js index 5f47392d..ad6fe8b8 100644 --- a/assets/js/b9207088.0df17469.js +++ b/assets/js/b9207088.f80f3130.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[978],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>m});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function a(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var i=o.createContext({}),c=function(e){var t=o.useContext(i),r=t;return e&&(r="function"==typeof e?e(t):a(a({},t),e)),r},d=function(e){var t=c(e.components);return o.createElement(i.Provider,{value:t},e.children)},u="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},k=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,l=e.originalType,i=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),u=c(r),k=n,m=u["".concat(i,".").concat(k)]||u[k]||p[k]||l;return r?o.createElement(m,a(a({ref:t},d),{},{components:r})):o.createElement(m,a({ref:t},d))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=r.length,a=new Array(l);a[0]=k;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s[u]="string"==typeof e?e:n,a[1]=s;for(var c=2;c{r.r(t),r.d(t,{assets:()=>i,contentTitle:()=>a,default:()=>p,frontMatter:()=>l,metadata:()=>s,toc:()=>c});var o=r(7462),n=(r(7294),r(3905));const l={title:"Cookbook",slug:"/cookbook"},a="Cookbook",s={unversionedId:"cookbook",id:"cookbook",title:"Cookbook",description:"Installation",source:"@site/docs/cookbook.md",sourceDirName:".",slug:"/cookbook",permalink:"/operator-for-redis-cluster/cookbook",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/cookbook.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Cookbook",slug:"/cookbook"},sidebar:"docs",previous:{title:"Home",permalink:"/operator-for-redis-cluster/"},next:{title:"Kubectl Plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin"}},i={},c=[{value:"Installation",id:"installation",level:2},{value:"Required Dependencies",id:"required-dependencies",level:3},{value:"Recommended Dependencies",id:"recommended-dependencies",level:3},{value:"Download and build the source code",id:"download-and-build-the-source-code",level:3},{value:"Create a Kubernetes cluster",id:"create-a-kubernetes-cluster",level:2},{value:"Deploy a Redis operator",id:"deploy-a-redis-operator",level:3},{value:"Deploy a Redis cluster",id:"deploy-a-redis-cluster",level:3},{value:"Clean up your environment",id:"clean-up-your-environment",level:3},{value:"Run end-to-end tests",id:"run-end-to-end-tests",level:2}],d={toc:c},u="wrapper";function p(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,o.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"cookbook"},"Cookbook"),(0,n.kt)("h2",{id:"installation"},"Installation"),(0,n.kt)("p",null,"Operator for Redis Cluster is written in ",(0,n.kt)("a",{parentName:"p",href:"https://golang.org/"},"Go"),"."),(0,n.kt)("h3",{id:"required-dependencies"},"Required Dependencies"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"make")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://golang.org/doc/install"},"Go 1.17+")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://www.docker.com/"},"Docker")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://helm.sh"},"Helm 3"))),(0,n.kt)("h3",{id:"recommended-dependencies"},"Recommended Dependencies"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://kind.sigs.k8s.io/"},"Kind")," or ",(0,n.kt)("a",{parentName:"li",href:"https://github.com/kubernetes/minikube"},"Minikube")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://github.com/golangci/golangci-lint"},"golangci-lint"))),(0,n.kt)("h3",{id:"download-and-build-the-source-code"},"Download and build the source code"),(0,n.kt)("p",null,"Start by making a fork of the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis-cluster")," repository. Then, clone your forked repo:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ git clone git@github.com:/operator-for-redis-cluster.git\nCloning into 'operator-for-redis-cluster'...\n$ cd operator-for-redis-cluster\n")),(0,n.kt)("p",null,"Build the project:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ make build\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:33 -s" -o bin/operator ./cmd/operator\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:35 -s" -o bin/node ./cmd/node\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:37 -s" -o bin/metrics ./cmd/metrics\n')),(0,n.kt)("p",null,"Run the test suite to make sure everything works:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ make test\n./go.test.sh\nok github.com/IBM/operator-for-redis-cluster/pkg/controller 5.162s coverage: 33.1% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/clustering 0.711s coverage: 75.6% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/pod 1.726s coverage: 40.0% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/sanitycheck 0.631s coverage: 21.5% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/garbagecollector 1.740s coverage: 75.0% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redis 0.728s coverage: 22.4% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redis/fake 0.148s coverage: 85.8% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redisnode 1.924s coverage: 43.4% of statements\n")),(0,n.kt)("p",null,"Install the kubectl Redis cluster plugin (more info ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/kubectl-plugin"},"here"),")"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ make plugin\n")),(0,n.kt)("h2",{id:"create-a-kubernetes-cluster"},"Create a Kubernetes cluster"),(0,n.kt)("p",null,"To run the Redis operator, you need to have a running Kubernetes cluster. You can use local k8s cluster frameworks such as ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," or ",(0,n.kt)("inlineCode",{parentName:"p"},"minikube"),". Use the following guide to install a ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," cluster similar to what we use in our e2e tests."),(0,n.kt)("p",null,"From the project root directory, create your kind cluster using the e2e test configuration:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind create cluster --config ./test/e2e/kind_config.yml\n")),(0,n.kt)("p",null,"Build the required docker images:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make container PREFIX= TAG=latest\n")),(0,n.kt)("p",null,"Once the kind cluster is up and running, load the images into the kind cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind load docker-image operator-for-redis:latest\n$ kind load docker-image node-for-redis:latest\n$ kind load docker-image metrics-for-redis:latest\n")),(0,n.kt)("h3",{id:"deploy-a-redis-operator"},"Deploy a Redis operator"),(0,n.kt)("p",null,"Install the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install op charts/operator-for-redis --wait --set image.repository=operator-for-redis --set image.tag=latest\nNAME: op\nLAST DEPLOYED: Thu Oct 21 15:11:51 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"Confirm that the operator is running properly:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl get pods\nNAME READY STATUS RESTARTS AGE\nop-operator-for-redis-64dbfb4b59-xjttw 1/1 Running 0 31s\n")),(0,n.kt)("h3",{id:"deploy-a-redis-cluster"},"Deploy a Redis cluster"),(0,n.kt)("p",null,"Install the ",(0,n.kt)("inlineCode",{parentName:"p"},"node-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install --wait cluster charts/node-for-redis --set image.repository=node-for-redis --set image.tag=latest\nNAME: cluster\nLAST DEPLOYED: Thu Oct 21 15:12:05 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"Check the cluster status:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl rc\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-cluster-node-for-redis-2h92v 10.244.1.89 172.18.0.3 5606ea9ab09678124a4b17de10ab92a78aac0b4d dal13 35.55M 10.95G 5462-10923\n | rediscluster-cluster-node-for-redis-nf24b 10.244.2.89 172.18.0.2 6840c0e5db16ebf073f57c67a6487c1c7f0d12d1 dal10 2.62M 10.95G\n + rediscluster-cluster-node-for-redis-4h4s2 10.244.2.88 172.18.0.2 8a2f2db39b85cf059e88dc80d7c9cafefac94de0 dal10 34.63M 10.95G 10924-16383\n | rediscluster-cluster-node-for-redis-77bt2 10.244.3.87 172.18.0.4 74582d0e0cedb458e81f1e9d4f32cdc3f5e9399b dal12 2.60M 10.95G\n + rediscluster-cluster-node-for-redis-dh6pt 10.244.3.86 172.18.0.4 81f5c13bec9a0545a62de08b2a309a87d29855c7 dal12 2.83M 10.95G 0-5461\n | rediscluster-cluster-node-for-redis-jnh2h 10.244.1.88 172.18.0.3 ffae381633377414597731597518529255fd9b69 dal13 2.64M 10.95G\n\n NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION ZONE SKEW\n cluster-node-for-redis default 6/6/6 ClusterOK OK 3/3 1-1/1 0/0/BALANCED\n")),(0,n.kt)("h3",{id:"clean-up-your-environment"},"Clean up your environment"),(0,n.kt)("p",null,"Delete the Redis cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ helm uninstall cluster\nrelease "cluster" deleted\n')),(0,n.kt)("p",null,"Delete the Redis operator:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ helm uninstall op\nrelease "op" deleted\n')),(0,n.kt)("p",null,"Ensure all pods have been deleted:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl get pods\nNo resources found in default namespace.\n")),(0,n.kt)("h2",{id:"run-end-to-end-tests"},"Run end-to-end tests"),(0,n.kt)("p",null,"If you followed the steps for creating a ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," cluster with the e2e test configuration, then running the e2e tests is simple."),(0,n.kt)("p",null,"Build the required docker images:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make container PREFIX=ibmcom/ TAG=local\nmake container-node PREFIX=ibmcom/ TAG=new\n")),(0,n.kt)("p",null,"Note that we need both ",(0,n.kt)("inlineCode",{parentName:"p"},"local")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"new")," image tags for a rolling update e2e test case."),(0,n.kt)("p",null,"Load the required images into the kind cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind load docker-image ibmcom/operator-for-redis:local\n$ kind load docker-image ibmcom/node-for-redis:local\n$ kind load docker-image ibmcom/node-for-redis:new\n")),(0,n.kt)("p",null,"Once the kind cluster is up and running, deploy the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install op charts/operator-for-redis --wait --set image.repository=ibmcom/operator-for-redis --set image.tag=local\nNAME: op\nLAST DEPLOYED: Thu Oct 21 15:11:51 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"When the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," pod is up and running, you can start the e2e regression:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ go test -timeout 30m ./test/e2e --kubeconfig=$HOME/.kube/config --ginkgo.v --test.v\nRunning Suite: RedisCluster Suite\n=================================\nRandom Seed: 1634845111\nWill run 11 of 11 specs\n\nOct 21 15:38:31.261: INFO: KubeconfigPath-> "/Users/kscharm/.kube/config"\nOct 21 15:38:31.295: INFO: Check whether RedisCluster resource is registered...\nRedisCluster CRUD operations\n should create a RedisCluster\n \n...\n\nRan 11 of 11 Specs in 517.299 seconds\nSUCCESS! -- 11 Passed | 0 Failed | 0 Pending | 0 Skipped\nPASS\nok github.com/IBM/operator-for-redis-cluster/test/e2e 517.776s\n')))}p.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[978],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>m});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function a(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var i=o.createContext({}),c=function(e){var t=o.useContext(i),r=t;return e&&(r="function"==typeof e?e(t):a(a({},t),e)),r},d=function(e){var t=c(e.components);return o.createElement(i.Provider,{value:t},e.children)},u="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},k=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,l=e.originalType,i=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),u=c(r),k=n,m=u["".concat(i,".").concat(k)]||u[k]||p[k]||l;return r?o.createElement(m,a(a({ref:t},d),{},{components:r})):o.createElement(m,a({ref:t},d))}));function m(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=r.length,a=new Array(l);a[0]=k;var s={};for(var i in t)hasOwnProperty.call(t,i)&&(s[i]=t[i]);s.originalType=e,s[u]="string"==typeof e?e:n,a[1]=s;for(var c=2;c{r.r(t),r.d(t,{assets:()=>i,contentTitle:()=>a,default:()=>p,frontMatter:()=>l,metadata:()=>s,toc:()=>c});var o=r(7462),n=(r(7294),r(3905));const l={title:"Cookbook",slug:"/cookbook"},a="Cookbook",s={unversionedId:"cookbook",id:"cookbook",title:"Cookbook",description:"Installation",source:"@site/docs/cookbook.md",sourceDirName:".",slug:"/cookbook",permalink:"/operator-for-redis-cluster/cookbook",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/cookbook.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Cookbook",slug:"/cookbook"},sidebar:"docs",previous:{title:"Home",permalink:"/operator-for-redis-cluster/"},next:{title:"Kubectl Plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin"}},i={},c=[{value:"Installation",id:"installation",level:2},{value:"Required Dependencies",id:"required-dependencies",level:3},{value:"Recommended Dependencies",id:"recommended-dependencies",level:3},{value:"Download and build the source code",id:"download-and-build-the-source-code",level:3},{value:"Create a Kubernetes cluster",id:"create-a-kubernetes-cluster",level:2},{value:"Deploy a Redis operator",id:"deploy-a-redis-operator",level:3},{value:"Deploy a Redis cluster",id:"deploy-a-redis-cluster",level:3},{value:"Clean up your environment",id:"clean-up-your-environment",level:3},{value:"Run end-to-end tests",id:"run-end-to-end-tests",level:2}],d={toc:c},u="wrapper";function p(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,o.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"cookbook"},"Cookbook"),(0,n.kt)("h2",{id:"installation"},"Installation"),(0,n.kt)("p",null,"Operator for Redis Cluster is written in ",(0,n.kt)("a",{parentName:"p",href:"https://golang.org/"},"Go"),"."),(0,n.kt)("h3",{id:"required-dependencies"},"Required Dependencies"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("inlineCode",{parentName:"li"},"make")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://golang.org/doc/install"},"Go 1.17+")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://www.docker.com/"},"Docker")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://helm.sh"},"Helm 3"))),(0,n.kt)("h3",{id:"recommended-dependencies"},"Recommended Dependencies"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://kind.sigs.k8s.io/"},"Kind")," or ",(0,n.kt)("a",{parentName:"li",href:"https://github.com/kubernetes/minikube"},"Minikube")),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("a",{parentName:"li",href:"https://github.com/golangci/golangci-lint"},"golangci-lint"))),(0,n.kt)("h3",{id:"download-and-build-the-source-code"},"Download and build the source code"),(0,n.kt)("p",null,"Start by making a fork of the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis-cluster")," repository. Then, clone your forked repo:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ git clone git@github.com:/operator-for-redis-cluster.git\nCloning into 'operator-for-redis-cluster'...\n$ cd operator-for-redis-cluster\n")),(0,n.kt)("p",null,"Build the project:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ make build\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:33 -s" -o bin/operator ./cmd/operator\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:35 -s" -o bin/node ./cmd/node\nCGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:37 -s" -o bin/metrics ./cmd/metrics\n')),(0,n.kt)("p",null,"Run the test suite to make sure everything works:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ make test\n./go.test.sh\nok github.com/IBM/operator-for-redis-cluster/pkg/controller 5.162s coverage: 33.1% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/clustering 0.711s coverage: 75.6% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/pod 1.726s coverage: 40.0% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/controller/sanitycheck 0.631s coverage: 21.5% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/garbagecollector 1.740s coverage: 75.0% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redis 0.728s coverage: 22.4% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redis/fake 0.148s coverage: 85.8% of statements\nok github.com/IBM/operator-for-redis-cluster/pkg/redisnode 1.924s coverage: 43.4% of statements\n")),(0,n.kt)("p",null,"Install the kubectl Redis cluster plugin (more info ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/kubectl-plugin"},"here"),")"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ make plugin\n")),(0,n.kt)("h2",{id:"create-a-kubernetes-cluster"},"Create a Kubernetes cluster"),(0,n.kt)("p",null,"To run the Redis operator, you need to have a running Kubernetes cluster. You can use local k8s cluster frameworks such as ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," or ",(0,n.kt)("inlineCode",{parentName:"p"},"minikube"),". Use the following guide to install a ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," cluster similar to what we use in our e2e tests."),(0,n.kt)("p",null,"From the project root directory, create your kind cluster using the e2e test configuration:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind create cluster --config ./test/e2e/kind_config.yml\n")),(0,n.kt)("p",null,"Build the required docker images:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make container PREFIX= TAG=latest\n")),(0,n.kt)("p",null,"Once the kind cluster is up and running, load the images into the kind cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind load docker-image operator-for-redis:latest\n$ kind load docker-image node-for-redis:latest\n$ kind load docker-image metrics-for-redis:latest\n")),(0,n.kt)("h3",{id:"deploy-a-redis-operator"},"Deploy a Redis operator"),(0,n.kt)("p",null,"Install the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install op charts/operator-for-redis --wait --set image.repository=operator-for-redis --set image.tag=latest\nNAME: op\nLAST DEPLOYED: Thu Oct 21 15:11:51 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"Confirm that the operator is running properly:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl get pods\nNAME READY STATUS RESTARTS AGE\nop-operator-for-redis-64dbfb4b59-xjttw 1/1 Running 0 31s\n")),(0,n.kt)("h3",{id:"deploy-a-redis-cluster"},"Deploy a Redis cluster"),(0,n.kt)("p",null,"Install the ",(0,n.kt)("inlineCode",{parentName:"p"},"node-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install --wait cluster charts/node-for-redis --set image.repository=node-for-redis --set image.tag=latest\nNAME: cluster\nLAST DEPLOYED: Thu Oct 21 15:12:05 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"Check the cluster status:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl rc\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-cluster-node-for-redis-2h92v 10.244.1.89 172.18.0.3 5606ea9ab09678124a4b17de10ab92a78aac0b4d dal13 35.55M 10.95G 5462-10923\n | rediscluster-cluster-node-for-redis-nf24b 10.244.2.89 172.18.0.2 6840c0e5db16ebf073f57c67a6487c1c7f0d12d1 dal10 2.62M 10.95G\n + rediscluster-cluster-node-for-redis-4h4s2 10.244.2.88 172.18.0.2 8a2f2db39b85cf059e88dc80d7c9cafefac94de0 dal10 34.63M 10.95G 10924-16383\n | rediscluster-cluster-node-for-redis-77bt2 10.244.3.87 172.18.0.4 74582d0e0cedb458e81f1e9d4f32cdc3f5e9399b dal12 2.60M 10.95G\n + rediscluster-cluster-node-for-redis-dh6pt 10.244.3.86 172.18.0.4 81f5c13bec9a0545a62de08b2a309a87d29855c7 dal12 2.83M 10.95G 0-5461\n | rediscluster-cluster-node-for-redis-jnh2h 10.244.1.88 172.18.0.3 ffae381633377414597731597518529255fd9b69 dal13 2.64M 10.95G\n\n NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION ZONE SKEW\n cluster-node-for-redis default 6/6/6 ClusterOK OK 3/3 1-1/1 0/0/BALANCED\n")),(0,n.kt)("h3",{id:"clean-up-your-environment"},"Clean up your environment"),(0,n.kt)("p",null,"Delete the Redis cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ helm uninstall cluster\nrelease "cluster" deleted\n')),(0,n.kt)("p",null,"Delete the Redis operator:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ helm uninstall op\nrelease "op" deleted\n')),(0,n.kt)("p",null,"Ensure all pods have been deleted:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kubectl get pods\nNo resources found in default namespace.\n")),(0,n.kt)("h2",{id:"run-end-to-end-tests"},"Run end-to-end tests"),(0,n.kt)("p",null,"If you followed the steps for creating a ",(0,n.kt)("inlineCode",{parentName:"p"},"kind")," cluster with the e2e test configuration, then running the e2e tests is simple."),(0,n.kt)("p",null,"Build the required docker images:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"make container PREFIX=ibmcom/ TAG=local\nmake container-node PREFIX=ibmcom/ TAG=new\n")),(0,n.kt)("p",null,"Note that we need both ",(0,n.kt)("inlineCode",{parentName:"p"},"local")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"new")," image tags for a rolling update e2e test case."),(0,n.kt)("p",null,"Load the required images into the kind cluster:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ kind load docker-image ibmcom/operator-for-redis:local\n$ kind load docker-image ibmcom/node-for-redis:local\n$ kind load docker-image ibmcom/node-for-redis:new\n")),(0,n.kt)("p",null,"Once the kind cluster is up and running, deploy the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," Helm chart:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},"$ helm install op charts/operator-for-redis --wait --set image.repository=ibmcom/operator-for-redis --set image.tag=local\nNAME: op\nLAST DEPLOYED: Thu Oct 21 15:11:51 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\n")),(0,n.kt)("p",null,"When the ",(0,n.kt)("inlineCode",{parentName:"p"},"operator-for-redis")," pod is up and running, you can start the e2e regression:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-console"},'$ go test -timeout 30m ./test/e2e --kubeconfig=$HOME/.kube/config --ginkgo.v --test.v\nRunning Suite: RedisCluster Suite\n=================================\nRandom Seed: 1634845111\nWill run 11 of 11 specs\n\nOct 21 15:38:31.261: INFO: KubeconfigPath-> "/Users/kscharm/.kube/config"\nOct 21 15:38:31.295: INFO: Check whether RedisCluster resource is registered...\nRedisCluster CRUD operations\n should create a RedisCluster\n \n...\n\nRan 11 of 11 Specs in 517.299 seconds\nSUCCESS! -- 11 Passed | 0 Failed | 0 Pending | 0 Skipped\nPASS\nok github.com/IBM/operator-for-redis-cluster/test/e2e 517.776s\n')))}p.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d12053bc.2505c139.js b/assets/js/d12053bc.d294519a.js similarity index 99% rename from assets/js/d12053bc.2505c139.js rename to assets/js/d12053bc.d294519a.js index 45b664cf..7cec19a1 100644 --- a/assets/js/d12053bc.2505c139.js +++ b/assets/js/d12053bc.d294519a.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[394],{3905:(e,t,i)=>{i.d(t,{Zo:()=>u,kt:()=>g});var n=i(7294);function a(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}function o(e,t){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),i.push.apply(i,n)}return i}function r(e){for(var t=1;t=0||(a[i]=e[i]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(a[i]=e[i])}return a}var s=n.createContext({}),m=function(e){var t=n.useContext(s),i=t;return e&&(i="function"==typeof e?e(t):r(r({},t),e)),i},u=function(e){var t=m(e.components);return n.createElement(s.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var i=e.components,a=e.mdxType,o=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=m(i),p=a,g=c["".concat(s,".").concat(p)]||c[p]||d[p]||o;return i?n.createElement(g,r(r({ref:t},u),{},{components:i})):n.createElement(g,r({ref:t},u))}));function g(e,t){var i=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=i.length,r=new Array(o);r[0]=p;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:a,r[1]=l;for(var m=2;m{i.r(t),i.d(t,{assets:()=>s,contentTitle:()=>r,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>m});var n=i(7462),a=(i(7294),i(3905));const o={title:"Key Migration",slug:"/key-migration"},r="Key Migration",l={unversionedId:"key-migration",id:"key-migration",title:"Key Migration",description:"Overview",source:"@site/docs/key-migration.md",sourceDirName:".",slug:"/key-migration",permalink:"/operator-for-redis-cluster/key-migration",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/key-migration.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Key Migration",slug:"/key-migration"},sidebar:"docs",previous:{title:"Rolling Update Procedure",permalink:"/operator-for-redis-cluster/rolling-update"},next:{title:"Contributing",permalink:"/operator-for-redis-cluster/contributing"}},s={},m=[{value:"Overview",id:"overview",level:2},{value:"Redis cluster migration configuration",id:"redis-cluster-migration-configuration",level:2},{value:"Default",id:"default",level:3},{value:"Definitions",id:"definitions",level:3},{value:"Rolling update key migration enabled - rollingUpdate.keyMigration: true",id:"rolling-update-key-migration-enabled---rollingupdatekeymigration-true",level:3},{value:"Examples",id:"examples",level:4},{value:"Rolling update key migration disabled - rollingUpdate.keyMigration: false",id:"rolling-update-key-migration-disabled---rollingupdatekeymigration-false",level:3},{value:"Examples",id:"examples-1",level:4}],u={toc:m},c="wrapper";function d(e){let{components:t,...i}=e;return(0,a.kt)(c,(0,n.Z)({},u,i,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"key-migration"},"Key Migration"),(0,a.kt)("h2",{id:"overview"},"Overview"),(0,a.kt)("p",null,"Key migration is the process by which Redis migrates keys from a source primary node to a destination primary node. The high-level steps for migrating keys can be found ",(0,a.kt)("a",{parentName:"p",href:"https://redis.io/commands/cluster-setslot#redis-cluster-live-resharding-explained"},"here"),". This feature allows users to better configure how keys are migrated, if at all."),(0,a.kt)("p",null,"Depending on the size of the Redis cluster, the key migration process can be time-consuming. For example, a cluster with thousands of Gigabytes of data can take hours to migrate keys during a scaling or rolling update operation. To speed up the scaling process, we give users the option to migrate slots without keys and provide configuration to control batching."),(0,a.kt)("h2",{id:"redis-cluster-migration-configuration"},"Redis cluster migration configuration"),(0,a.kt)("h3",{id:"default"},"Default"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate:\n keyMigration: true\n keyBatchSize: 10000\n slotBatchSize: 16\n idleTimeoutMillis: 30000\n warmingDelayMillis: 0\n\nscaling:\n keyBatchSize: 10000\n slotBatchSize: 16\n idleTimeoutMillis: 30000\n")),(0,a.kt)("p",null,"If you observe the default configuration above, you will notice that there are two separate sections for configuring key migration during rolling updates and scaling operations. The ",(0,a.kt)("inlineCode",{parentName:"p"},"rollingUpdate")," section determines how keys are migrated during ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/rolling-update"},"rolling updates"),", and the ",(0,a.kt)("inlineCode",{parentName:"p"},"scaling")," section determines how keys are migrated during ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/scaling"},"scaling operations"),". The following definitions apply to both configurations."),(0,a.kt)("h3",{id:"definitions"},"Definitions"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyMigration")," specifies whether to migrate keys during rolling updates. For most use cases, users will want to keep this value set to ",(0,a.kt)("inlineCode",{parentName:"p"},"true"),". However, for users who use Redis cluster as a caching tool instead of a persistent database, you may want to consider setting this to ",(0,a.kt)("inlineCode",{parentName:"p"},"false"),". When set to ",(0,a.kt)("inlineCode",{parentName:"p"},"false"),", this feature will transfer slots from the old Redis primary to the new primary without migrating keys. For large clusters, this can save a significant amount of time with the tradeoff of temporarily increasing the number of requests to the backend. The increase in backend hit rate can be mitigated by modifying ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),". The next sections will discuss the two different configuration options for key migration."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize")," determines the number of keys to get from a single slot during each migration iteration. By default, this value is ",(0,a.kt)("inlineCode",{parentName:"p"},"10000")," keys."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize")," specifies the number of slots to migrate on each iteration. By default, this value is ",(0,a.kt)("inlineCode",{parentName:"p"},"16")," slots. For most use cases, set this value to the number of logical CPUs. Usually, this is two times the CPU resource limits in the Redis operator deployment. See ",(0,a.kt)("a",{parentName:"p",href:"https://pkg.go.dev/runtime#NumCPU"},"runtime.CPU")," for more information on how Go checks the number of available CPUs. Also, keep in mind a Redis cluster has ",(0,a.kt)("inlineCode",{parentName:"p"},"16384")," total slots, and those slots are evenly distributed across the primary nodes."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis")," is the maximum idle time at any point during key migration. This means the migration should make progress without blocking for more than the specified number of milliseconds. See the ",(0,a.kt)("a",{parentName:"p",href:"https://redis.io/commands/migrate"},"Redis migrate command")," for more information about the timeout."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," is the amount of time in between each batch of slots. As the name suggests, it allows the new Redis node to warm its cache before moving on to the next node in the rolling update."),(0,a.kt)("h3",{id:"rolling-update-key-migration-enabled---rollingupdatekeymigration-true"},"Rolling update key migration enabled - ",(0,a.kt)("inlineCode",{parentName:"h3"},"rollingUpdate.keyMigration: true")),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize"),": change this value depending on the total number of keys in your Redis cluster. Increasing this value can reduce the amount of time it takes to migrate keys by moving a larger number of keys per batch of slots."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize"),": increasing this value higher than the number of logical CPUs will have minimal effect on rolling update performance."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis"),": do not modify this value unless you receive this specific error."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),": it's best to set this value to zero unless you want to introduce a delay between slot migrations. You can still set a non-zero delay if you want to reduce the overall strain on the cluster by the migration calls, and you do not care about how long the migration takes."),(0,a.kt)("h4",{id:"examples"},"Examples"),(0,a.kt)("p",null,"Assume all clusters have allocated 4 physical CPUs and 1 Gb memory for the Redis operator. We have 8 logical CPUs."),(0,a.kt)("p",null,"Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot \n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of 2,500 slots\n\nscaling: # For scaling operations,\n keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Observe how ",(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize")," is significantly greater than in the previous example because we have ten times the data per node."),(0,a.kt)("p",null,"Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We optimize for fast key migration on both rolling updates and scaling operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("h3",{id:"rolling-update-key-migration-disabled---rollingupdatekeymigration-false"},"Rolling update key migration disabled - ",(0,a.kt)("inlineCode",{parentName:"h3"},"rollingUpdate.keyMigration: false")),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize"),": not used when no keys are migrated."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize"),": depends on how quickly you want to wipe the cache. You can use a smaller ",(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize")," and increase ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," to make this process go slower. If your backend can handle a higher increase in requests, you can set ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," to something small or even zero."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis"),": not used in when no keys are migrated."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),": increasing this value will ease the strain on your backend as slot ownership transfers during a rolling update by pausing after migrating a single batch of slots. "),(0,a.kt)("p",null,"Please be sure to properly configure ",(0,a.kt)("inlineCode",{parentName:"p"},"rollingUpdate")," based on your Redis cluster if you plan on using this configuration. Setting too small a value for ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," will quickly wipe all the keys in your cluster without yielding sufficient time for new Redis nodes to warm up."),(0,a.kt)("h4",{id:"examples-1"},"Examples"),(0,a.kt)("p",null,"Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 1000 # Wait 1 second between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 10000 # Wait 10 seconds between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We want to wipe the cache as quickly as possible on rolling updates, while also maintaining fast key migration on scaling operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[394],{3905:(e,t,i)=>{i.d(t,{Zo:()=>u,kt:()=>g});var n=i(7294);function a(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}function o(e,t){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),i.push.apply(i,n)}return i}function r(e){for(var t=1;t=0||(a[i]=e[i]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(a[i]=e[i])}return a}var s=n.createContext({}),m=function(e){var t=n.useContext(s),i=t;return e&&(i="function"==typeof e?e(t):r(r({},t),e)),i},u=function(e){var t=m(e.components);return n.createElement(s.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},p=n.forwardRef((function(e,t){var i=e.components,a=e.mdxType,o=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),c=m(i),p=a,g=c["".concat(s,".").concat(p)]||c[p]||d[p]||o;return i?n.createElement(g,r(r({ref:t},u),{},{components:i})):n.createElement(g,r({ref:t},u))}));function g(e,t){var i=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=i.length,r=new Array(o);r[0]=p;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:a,r[1]=l;for(var m=2;m{i.r(t),i.d(t,{assets:()=>s,contentTitle:()=>r,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>m});var n=i(7462),a=(i(7294),i(3905));const o={title:"Key Migration",slug:"/key-migration"},r="Key Migration",l={unversionedId:"key-migration",id:"key-migration",title:"Key Migration",description:"Overview",source:"@site/docs/key-migration.md",sourceDirName:".",slug:"/key-migration",permalink:"/operator-for-redis-cluster/key-migration",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/key-migration.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Key Migration",slug:"/key-migration"},sidebar:"docs",previous:{title:"Rolling Update Procedure",permalink:"/operator-for-redis-cluster/rolling-update"},next:{title:"Contributing",permalink:"/operator-for-redis-cluster/contributing"}},s={},m=[{value:"Overview",id:"overview",level:2},{value:"Redis cluster migration configuration",id:"redis-cluster-migration-configuration",level:2},{value:"Default",id:"default",level:3},{value:"Definitions",id:"definitions",level:3},{value:"Rolling update key migration enabled - rollingUpdate.keyMigration: true",id:"rolling-update-key-migration-enabled---rollingupdatekeymigration-true",level:3},{value:"Examples",id:"examples",level:4},{value:"Rolling update key migration disabled - rollingUpdate.keyMigration: false",id:"rolling-update-key-migration-disabled---rollingupdatekeymigration-false",level:3},{value:"Examples",id:"examples-1",level:4}],u={toc:m},c="wrapper";function d(e){let{components:t,...i}=e;return(0,a.kt)(c,(0,n.Z)({},u,i,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"key-migration"},"Key Migration"),(0,a.kt)("h2",{id:"overview"},"Overview"),(0,a.kt)("p",null,"Key migration is the process by which Redis migrates keys from a source primary node to a destination primary node. The high-level steps for migrating keys can be found ",(0,a.kt)("a",{parentName:"p",href:"https://redis.io/commands/cluster-setslot#redis-cluster-live-resharding-explained"},"here"),". This feature allows users to better configure how keys are migrated, if at all."),(0,a.kt)("p",null,"Depending on the size of the Redis cluster, the key migration process can be time-consuming. For example, a cluster with thousands of Gigabytes of data can take hours to migrate keys during a scaling or rolling update operation. To speed up the scaling process, we give users the option to migrate slots without keys and provide configuration to control batching."),(0,a.kt)("h2",{id:"redis-cluster-migration-configuration"},"Redis cluster migration configuration"),(0,a.kt)("h3",{id:"default"},"Default"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate:\n keyMigration: true\n keyBatchSize: 10000\n slotBatchSize: 16\n idleTimeoutMillis: 30000\n warmingDelayMillis: 0\n\nscaling:\n keyBatchSize: 10000\n slotBatchSize: 16\n idleTimeoutMillis: 30000\n")),(0,a.kt)("p",null,"If you observe the default configuration above, you will notice that there are two separate sections for configuring key migration during rolling updates and scaling operations. The ",(0,a.kt)("inlineCode",{parentName:"p"},"rollingUpdate")," section determines how keys are migrated during ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/rolling-update"},"rolling updates"),", and the ",(0,a.kt)("inlineCode",{parentName:"p"},"scaling")," section determines how keys are migrated during ",(0,a.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/scaling"},"scaling operations"),". The following definitions apply to both configurations."),(0,a.kt)("h3",{id:"definitions"},"Definitions"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyMigration")," specifies whether to migrate keys during rolling updates. For most use cases, users will want to keep this value set to ",(0,a.kt)("inlineCode",{parentName:"p"},"true"),". However, for users who use Redis cluster as a caching tool instead of a persistent database, you may want to consider setting this to ",(0,a.kt)("inlineCode",{parentName:"p"},"false"),". When set to ",(0,a.kt)("inlineCode",{parentName:"p"},"false"),", this feature will transfer slots from the old Redis primary to the new primary without migrating keys. For large clusters, this can save a significant amount of time with the tradeoff of temporarily increasing the number of requests to the backend. The increase in backend hit rate can be mitigated by modifying ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),". The next sections will discuss the two different configuration options for key migration."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize")," determines the number of keys to get from a single slot during each migration iteration. By default, this value is ",(0,a.kt)("inlineCode",{parentName:"p"},"10000")," keys."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize")," specifies the number of slots to migrate on each iteration. By default, this value is ",(0,a.kt)("inlineCode",{parentName:"p"},"16")," slots. For most use cases, set this value to the number of logical CPUs. Usually, this is two times the CPU resource limits in the Redis operator deployment. See ",(0,a.kt)("a",{parentName:"p",href:"https://pkg.go.dev/runtime#NumCPU"},"runtime.CPU")," for more information on how Go checks the number of available CPUs. Also, keep in mind a Redis cluster has ",(0,a.kt)("inlineCode",{parentName:"p"},"16384")," total slots, and those slots are evenly distributed across the primary nodes."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis")," is the maximum idle time at any point during key migration. This means the migration should make progress without blocking for more than the specified number of milliseconds. See the ",(0,a.kt)("a",{parentName:"p",href:"https://redis.io/commands/migrate"},"Redis migrate command")," for more information about the timeout."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," is the amount of time in between each batch of slots. As the name suggests, it allows the new Redis node to warm its cache before moving on to the next node in the rolling update."),(0,a.kt)("h3",{id:"rolling-update-key-migration-enabled---rollingupdatekeymigration-true"},"Rolling update key migration enabled - ",(0,a.kt)("inlineCode",{parentName:"h3"},"rollingUpdate.keyMigration: true")),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize"),": change this value depending on the total number of keys in your Redis cluster. Increasing this value can reduce the amount of time it takes to migrate keys by moving a larger number of keys per batch of slots."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize"),": increasing this value higher than the number of logical CPUs will have minimal effect on rolling update performance."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis"),": do not modify this value unless you receive this specific error."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),": it's best to set this value to zero unless you want to introduce a delay between slot migrations. You can still set a non-zero delay if you want to reduce the overall strain on the cluster by the migration calls, and you do not care about how long the migration takes."),(0,a.kt)("h4",{id:"examples"},"Examples"),(0,a.kt)("p",null,"Assume all clusters have allocated 4 physical CPUs and 1 Gb memory for the Redis operator. We have 8 logical CPUs."),(0,a.kt)("p",null,"Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot \n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of 2,500 slots\n\nscaling: # For scaling operations,\n keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Observe how ",(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize")," is significantly greater than in the previous example because we have ten times the data per node."),(0,a.kt)("p",null,"Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We optimize for fast key migration on both rolling updates and scaling operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: true # Key migration is enabled\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("h3",{id:"rolling-update-key-migration-disabled---rollingupdatekeymigration-false"},"Rolling update key migration disabled - ",(0,a.kt)("inlineCode",{parentName:"h3"},"rollingUpdate.keyMigration: false")),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"keyBatchSize"),": not used when no keys are migrated."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize"),": depends on how quickly you want to wipe the cache. You can use a smaller ",(0,a.kt)("inlineCode",{parentName:"p"},"slotBatchSize")," and increase ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," to make this process go slower. If your backend can handle a higher increase in requests, you can set ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," to something small or even zero."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"idleTimeoutMillis"),": not used in when no keys are migrated."),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis"),": increasing this value will ease the strain on your backend as slot ownership transfers during a rolling update by pausing after migrating a single batch of slots. "),(0,a.kt)("p",null,"Please be sure to properly configure ",(0,a.kt)("inlineCode",{parentName:"p"},"rollingUpdate")," based on your Redis cluster if you plan on using this configuration. Setting too small a value for ",(0,a.kt)("inlineCode",{parentName:"p"},"warmingDelayMillis")," will quickly wipe all the keys in your cluster without yielding sufficient time for new Redis nodes to warm up."),(0,a.kt)("h4",{id:"examples-1"},"Examples"),(0,a.kt)("p",null,"Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 1000 # Wait 1 second between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 10000 # Wait 10 seconds between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")),(0,a.kt)("p",null,"Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We want to wipe the cache as quickly as possible on rolling updates, while also maintaining fast key migration on scaling operations:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},"rollingUpdate: # For rolling updates,\n keyMigration: false # Key migration is disabled\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n warmingDelayMillis: 0 # No delay between each batch of slots\n\nscaling: # For scaling operations,\n keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot\n slotBatchSize: 8 # Transfer 8 slots on each migration iteration\n idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/fd0c4088.ff5067dd.js b/assets/js/fd0c4088.b901191e.js similarity index 99% rename from assets/js/fd0c4088.ff5067dd.js rename to assets/js/fd0c4088.b901191e.js index 4379fddd..5c8f26c8 100644 --- a/assets/js/fd0c4088.ff5067dd.js +++ b/assets/js/fd0c4088.b901191e.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[919],{3905:(e,t,r)=>{r.d(t,{Zo:()=>u,kt:()=>f});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function d(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=a.createContext({}),o=function(e){var t=a.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):d(d({},t),e)),r},u=function(e){var t=o(e.components);return a.createElement(s.Provider,{value:t},e.children)},c="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},b=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,l=e.originalType,s=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),c=o(r),b=n,f=c["".concat(s,".").concat(b)]||c[b]||p[b]||l;return r?a.createElement(f,d(d({ref:t},u),{},{components:r})):a.createElement(f,d({ref:t},u))}));function f(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=r.length,d=new Array(l);d[0]=b;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i[c]="string"==typeof e?e:n,d[1]=i;for(var o=2;o{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>d,default:()=>p,frontMatter:()=>l,metadata:()=>i,toc:()=>o});var a=r(7462),n=(r(7294),r(3905));const l={title:"Kubectl Plugin",slug:"/kubectl-plugin"},d="Kubectl Plugin",i={unversionedId:"kubectl-plugin",id:"kubectl-plugin",title:"Kubectl Plugin",description:"The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the official documentation for more details.",source:"@site/docs/kubectl-plugin.md",sourceDirName:".",slug:"/kubectl-plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/kubectl-plugin.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Kubectl Plugin",slug:"/kubectl-plugin"},sidebar:"docs",previous:{title:"Cookbook",permalink:"/operator-for-redis-cluster/cookbook"},next:{title:"Redis Server Configuration",permalink:"/operator-for-redis-cluster/configuration"}},s={},o=[{value:"Installation",id:"installation",level:2},{value:"Usage",id:"usage",level:2},{value:"Redis Cluster Status",id:"redis-cluster-status",level:2},{value:"Redis Cluster Status Fields",id:"redis-cluster-status-fields",level:3},{value:"Redis Cluster Pod Role Prefix Legend",id:"redis-cluster-pod-role-prefix-legend",level:3},{value:"Redis Cluster State",id:"redis-cluster-state",level:2},{value:"Redis Cluster State Fields",id:"redis-cluster-state-fields",level:3}],u={toc:o},c="wrapper";function p(e){let{components:t,...r}=e;return(0,n.kt)(c,(0,a.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"kubectl-plugin"},"Kubectl Plugin"),(0,n.kt)("p",null,"The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/"},"official documentation")," for more details."),(0,n.kt)("h2",{id:"installation"},"Installation"),(0,n.kt)("p",null,"By default, the plugin will install in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.kube/plugins"),"."),(0,n.kt)("p",null,"Run ",(0,n.kt)("inlineCode",{parentName:"p"},"make plugin")," to install the plugin. After installation is complete, add the plugin to your PATH so ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/#installing-kubectl-plugins"},(0,n.kt)("inlineCode",{parentName:"a"},"kubectl"))," can find it. By default, the plugin is installed to ",(0,n.kt)("inlineCode",{parentName:"p"},"$HOME/.kube/plugins/rediscluster"),"."),(0,n.kt)("p",null,"Alternatively, you can download the plugin manually from the assets tab on the ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/IBM/operator-for-redis-cluster/releases"},"releases page")),(0,n.kt)("h2",{id:"usage"},"Usage"),(0,n.kt)("p",null,"Example usage:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-text"},"kubectl rc\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-rc1-node-for-redis-7jl8q 172.30.255.112 10.183.176.60 5478771ba4c34dbad9df8d30ac4bec5c9ba0842e wdc04 1023.75M 1.00G db0=669808 2731-5461\n | rediscluster-rc1-node-for-redis-7q9jn 172.30.68.235 10.191.41.164 15c388f164ad0946691482c7de72939848ca86e2 wdc07 1023.76M 1.00G db0=669808\n | rediscluster-rc1-node-for-redis-wjkk4 172.30.255.169 10.188.125.58 442de26f8cc9a011df307932a683177a5cd034a9 wdc06 1023.77M 1.00G db0=669808\n + rediscluster-rc1-node-for-redis-bmrgm 172.30.217.164 10.183.176.51 dd59697e82edf1554468f239f63ea1efd6718d4b wdc04 1023.78M 1.00G db0=669675 5462-8192\n | rediscluster-rc1-node-for-redis-7lw4l 172.30.61.98 10.188.125.33 fc5718a7e9bd963f80b9cbd786bbc02d80b7f191 wdc06 1023.78M 1.00G db0=669675\n | rediscluster-rc1-node-for-redis-qtzx8 172.30.188.104 10.191.41.140 8cd31d550d6935868d5da12ab78ddfb8c6fea1a2 wdc07 1023.78M 1.00G db0=669675\n + rediscluster-rc1-node-for-redis-dbrmg 172.30.140.228 10.183.176.53 c56420993afd35596425ae9e10a7e902cad5b6f8 wdc04 1023.78M 1.00G db0=670310 13655-16383\n | rediscluster-rc1-node-for-redis-f4mxd 172.30.68.236 10.191.41.164 b3356e03f83227832662f1bc3bae50273e99059b wdc07 1023.82M 1.00G db0=670310\n | rediscluster-rc1-node-for-redis-hhzdx 172.30.255.170 10.188.125.58 cc7bb986e42dd2fe9ead405e39a1a559b1b86e71 wdc06 1023.77M 1.00G db0=670310\n + rediscluster-rc1-node-for-redis-srxrs 172.30.188.105 10.191.41.140 31e880eef26377e719b28894a8fa469939b05a98 wdc07 1023.79M 1.00G db0=669522 10924-13654\n | rediscluster-rc1-node-for-redis-hg6q4 172.30.255.113 10.183.176.60 bd9cd001e5bdcba527277e39f63908ea18504f75 wdc04 1023.80M 1.00G db0=669522\n | rediscluster-rc1-node-for-redis-zgmqk 172.30.16.231 10.188.125.3 caaac384a8cd2cb6181925c889557bdb028aec0e wdc06 1023.81M 1.00G db0=669522\n + rediscluster-rc1-node-for-redis-szb9x 172.30.61.97 10.188.125.33 07de0c67262e816f7792f0a68c4c4a5b47291f46 wdc06 1023.80M 1.00G db0=669743 0-2730\n | rediscluster-rc1-node-for-redis-gvkb4 172.30.217.165 10.183.176.51 f1ff1e5c0a77cb7b5850d8808f950c1303f96056 wdc04 1023.76M 1.00G db0=669743\n | rediscluster-rc1-node-for-redis-znn4x 172.30.67.94 10.191.41.163 e0ab667b6629a660b01c20574750e3a487c69a1b wdc07 1023.78M 1.00G db0=669743\n + rediscluster-rc1-node-for-redis-xf8mr 172.30.67.95 10.191.41.163 8e9fe3e022f63f5fcc2a117edaadd93e39bfbb27 wdc07 1023.78M 1.00G db0=669711 8193-10923\n | rediscluster-rc1-node-for-redis-r2qd2 172.30.16.232 10.188.125.3 0da8f5799b86c40f7ea0c3ebf71ce3955afa1dd1 wdc06 1023.79M 1.00G db0=669711\n | rediscluster-rc1-node-for-redis-zfsxt 172.30.140.229 10.183.176.53 b4c22806c4c2529713e6e05b0d6fd994f0e801c7 wdc04 1023.78M 1.00G db0=669711\n\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-rc2-node-for-redis-57g56 172.30.68.237 10.191.41.164 cc82878c2bdd6381d569709334db86ef6cfade19 wdc07 175.67M 1.00G db0=113397 5462-10923\n + rediscluster-rc2-node-for-redis-942rt 172.30.140.230 10.183.176.53 291f2f06d13a1d821219bc9acfbd5323be1552d9 wdc04 174.82M 1.00G db0=112681 10924-16383\n + rediscluster-rc2-node-for-redis-gqmhs 172.30.16.233 10.188.125.3 85f3c1c6652b5fb76b40c8bdffe3a9af57f22d4d wdc06 175.34M 1.00G db0=113014 0-5461\n\n NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION\n rc1-node-for-redis default 18/18/18 ClusterOK OK 6/6 2-2/2\n rc2-node-for-redis default 3/3/3 ClusterOK OK 3/3 0-0/0\n")),(0,n.kt)("p",null,"The example above illustrates the Redis cluster plugin's output when there are two clusters being managed by one Redis operator. The top portion of the output is referred to as the Redis cluster's status and the bottom portion refers to the Redis cluster's state."),(0,n.kt)("h2",{id:"redis-cluster-status"},"Redis Cluster Status"),(0,n.kt)("p",null,"The Redis cluster status provides an easy way to see how your cluster is set up and which replicas belong to which primary. Without the cluster state, it's really difficult to determine in which ",(0,n.kt)("inlineCode",{parentName:"p"},"ZONE")," a Redis pod resides. The ",(0,n.kt)("inlineCode",{parentName:"p"},"POD NAME"),", ",(0,n.kt)("inlineCode",{parentName:"p"},"IP"),", and ",(0,n.kt)("inlineCode",{parentName:"p"},"ID")," fields help when parsing logs because these properties are used differently depending on what part of the code is logging messages. The ",(0,n.kt)("inlineCode",{parentName:"p"},"USED MEMORY")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"MAX MEMORY")," fields help determine if you should be seeing evictions, while the ",(0,n.kt)("inlineCode",{parentName:"p"},"KEYS")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"SLOTS")," fields can help determine if you have any hot spots in your cluster."),(0,n.kt)("h3",{id:"redis-cluster-status-fields"},"Redis Cluster Status Fields"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Field"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"POD NAME"),(0,n.kt)("td",{parentName:"tr",align:null},"Name of Redis pod where the first character indicates its role. See the ",(0,n.kt)("a",{parentName:"td",href:"#redis-cluster-pod-role-prefix-legend"},"legend")," for more details")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"IP"),(0,n.kt)("td",{parentName:"tr",align:null},"Internal IP address of the Redis pod")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NODE"),(0,n.kt)("td",{parentName:"tr",align:null},"IP address of the worker node on which the pod has been scheduled")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ID"),(0,n.kt)("td",{parentName:"tr",align:null},"Redis ID of the pod")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ZONE"),(0,n.kt)("td",{parentName:"tr",align:null},"Zone of the worker node on which the pod has been scheduled")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"USED MEMORY"),(0,n.kt)("td",{parentName:"tr",align:null},"Human-readable representation of the total number of bytes allocated by Redis using its allocator")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"MAX MEMORY"),(0,n.kt)("td",{parentName:"tr",align:null},"Human-readable representation of Redis' ",(0,n.kt)("inlineCode",{parentName:"td"},"maxmemory")," configuration directive")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"KEYS"),(0,n.kt)("td",{parentName:"tr",align:null},"List of the number of keys in each database using the form ",(0,n.kt)("inlineCode",{parentName:"td"},"db0:db0_key_count"),",",(0,n.kt)("inlineCode",{parentName:"td"},"db1:db1_key_count"))),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"SLOTS"),(0,n.kt)("td",{parentName:"tr",align:null},"List of the slot ranges owned by this primary")))),(0,n.kt)("h3",{id:"redis-cluster-pod-role-prefix-legend"},"Redis Cluster Pod Role Prefix Legend"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:"center"},"Prefix"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"+"),(0,n.kt)("td",{parentName:"tr",align:null},"Primary")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"|"),(0,n.kt)("td",{parentName:"tr",align:null},"Replica to the primary above")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"?"),(0,n.kt)("td",{parentName:"tr",align:null},"Pod that matches the label selector but is not a part of the cluster yet")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"^"),(0,n.kt)("td",{parentName:"tr",align:null},"Pod that is currently joining the cluster but has yet to be assigned a role")))),(0,n.kt)("h2",{id:"redis-cluster-state"},"Redis Cluster State"),(0,n.kt)("p",null,"The Redis cluster state provides additional information that is useful when trying to determine the cluster's health. It also indicates if a scaling, rebalancing, or rolling update operation is occurring."),(0,n.kt)("h3",{id:"redis-cluster-state-fields"},"Redis Cluster State Fields"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Field"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NAME"),(0,n.kt)("td",{parentName:"tr",align:null},"Redis cluster name")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NAMESPACE"),(0,n.kt)("td",{parentName:"tr",align:null},"Namespace of Redis cluster deployment")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"OPS STATUS"),(0,n.kt)("td",{parentName:"tr",align:null},"ClusterOK ","|"," Scaling ","|"," Rebalancing ","|"," RollingUpdate")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"REDIS STATUS"),(0,n.kt)("td",{parentName:"tr",align:null},"OK ","|"," KO ","|"," Scaling ","|"," Calculating Rebalancing ","|"," Rebalancing ","|"," RollingUpdate")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"PODS"),(0,n.kt)("td",{parentName:"tr",align:null},"Current number of ready pods / current number of total pods / desired number of pods")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NB PRIMARY"),(0,n.kt)("td",{parentName:"tr",align:null},"Current number of primaries / desired number of primaries")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"REPLICATION"),(0,n.kt)("td",{parentName:"tr",align:null},"Current min RF - current max RF / desired RF")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ZONE SKEW"),(0,n.kt)("td",{parentName:"tr",align:null},"Primary node zone skew / replica node zone skew / BALANCED ","|"," UNBALANCED")))))}p.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[919],{3905:(e,t,r)=>{r.d(t,{Zo:()=>u,kt:()=>f});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function d(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var s=a.createContext({}),o=function(e){var t=a.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):d(d({},t),e)),r},u=function(e){var t=o(e.components);return a.createElement(s.Provider,{value:t},e.children)},c="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},b=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,l=e.originalType,s=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),c=o(r),b=n,f=c["".concat(s,".").concat(b)]||c[b]||p[b]||l;return r?a.createElement(f,d(d({ref:t},u),{},{components:r})):a.createElement(f,d({ref:t},u))}));function f(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var l=r.length,d=new Array(l);d[0]=b;var i={};for(var s in t)hasOwnProperty.call(t,s)&&(i[s]=t[s]);i.originalType=e,i[c]="string"==typeof e?e:n,d[1]=i;for(var o=2;o{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>d,default:()=>p,frontMatter:()=>l,metadata:()=>i,toc:()=>o});var a=r(7462),n=(r(7294),r(3905));const l={title:"Kubectl Plugin",slug:"/kubectl-plugin"},d="Kubectl Plugin",i={unversionedId:"kubectl-plugin",id:"kubectl-plugin",title:"Kubectl Plugin",description:"The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the official documentation for more details.",source:"@site/docs/kubectl-plugin.md",sourceDirName:".",slug:"/kubectl-plugin",permalink:"/operator-for-redis-cluster/kubectl-plugin",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/kubectl-plugin.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Kubectl Plugin",slug:"/kubectl-plugin"},sidebar:"docs",previous:{title:"Cookbook",permalink:"/operator-for-redis-cluster/cookbook"},next:{title:"Redis Server Configuration",permalink:"/operator-for-redis-cluster/configuration"}},s={},o=[{value:"Installation",id:"installation",level:2},{value:"Usage",id:"usage",level:2},{value:"Redis Cluster Status",id:"redis-cluster-status",level:2},{value:"Redis Cluster Status Fields",id:"redis-cluster-status-fields",level:3},{value:"Redis Cluster Pod Role Prefix Legend",id:"redis-cluster-pod-role-prefix-legend",level:3},{value:"Redis Cluster State",id:"redis-cluster-state",level:2},{value:"Redis Cluster State Fields",id:"redis-cluster-state-fields",level:3}],u={toc:o},c="wrapper";function p(e){let{components:t,...r}=e;return(0,n.kt)(c,(0,a.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"kubectl-plugin"},"Kubectl Plugin"),(0,n.kt)("p",null,"The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/"},"official documentation")," for more details."),(0,n.kt)("h2",{id:"installation"},"Installation"),(0,n.kt)("p",null,"By default, the plugin will install in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.kube/plugins"),"."),(0,n.kt)("p",null,"Run ",(0,n.kt)("inlineCode",{parentName:"p"},"make plugin")," to install the plugin. After installation is complete, add the plugin to your PATH so ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/#installing-kubectl-plugins"},(0,n.kt)("inlineCode",{parentName:"a"},"kubectl"))," can find it. By default, the plugin is installed to ",(0,n.kt)("inlineCode",{parentName:"p"},"$HOME/.kube/plugins/rediscluster"),"."),(0,n.kt)("p",null,"Alternatively, you can download the plugin manually from the assets tab on the ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/IBM/operator-for-redis-cluster/releases"},"releases page")),(0,n.kt)("h2",{id:"usage"},"Usage"),(0,n.kt)("p",null,"Example usage:"),(0,n.kt)("pre",null,(0,n.kt)("code",{parentName:"pre",className:"language-text"},"kubectl rc\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-rc1-node-for-redis-7jl8q 172.30.255.112 10.183.176.60 5478771ba4c34dbad9df8d30ac4bec5c9ba0842e wdc04 1023.75M 1.00G db0=669808 2731-5461\n | rediscluster-rc1-node-for-redis-7q9jn 172.30.68.235 10.191.41.164 15c388f164ad0946691482c7de72939848ca86e2 wdc07 1023.76M 1.00G db0=669808\n | rediscluster-rc1-node-for-redis-wjkk4 172.30.255.169 10.188.125.58 442de26f8cc9a011df307932a683177a5cd034a9 wdc06 1023.77M 1.00G db0=669808\n + rediscluster-rc1-node-for-redis-bmrgm 172.30.217.164 10.183.176.51 dd59697e82edf1554468f239f63ea1efd6718d4b wdc04 1023.78M 1.00G db0=669675 5462-8192\n | rediscluster-rc1-node-for-redis-7lw4l 172.30.61.98 10.188.125.33 fc5718a7e9bd963f80b9cbd786bbc02d80b7f191 wdc06 1023.78M 1.00G db0=669675\n | rediscluster-rc1-node-for-redis-qtzx8 172.30.188.104 10.191.41.140 8cd31d550d6935868d5da12ab78ddfb8c6fea1a2 wdc07 1023.78M 1.00G db0=669675\n + rediscluster-rc1-node-for-redis-dbrmg 172.30.140.228 10.183.176.53 c56420993afd35596425ae9e10a7e902cad5b6f8 wdc04 1023.78M 1.00G db0=670310 13655-16383\n | rediscluster-rc1-node-for-redis-f4mxd 172.30.68.236 10.191.41.164 b3356e03f83227832662f1bc3bae50273e99059b wdc07 1023.82M 1.00G db0=670310\n | rediscluster-rc1-node-for-redis-hhzdx 172.30.255.170 10.188.125.58 cc7bb986e42dd2fe9ead405e39a1a559b1b86e71 wdc06 1023.77M 1.00G db0=670310\n + rediscluster-rc1-node-for-redis-srxrs 172.30.188.105 10.191.41.140 31e880eef26377e719b28894a8fa469939b05a98 wdc07 1023.79M 1.00G db0=669522 10924-13654\n | rediscluster-rc1-node-for-redis-hg6q4 172.30.255.113 10.183.176.60 bd9cd001e5bdcba527277e39f63908ea18504f75 wdc04 1023.80M 1.00G db0=669522\n | rediscluster-rc1-node-for-redis-zgmqk 172.30.16.231 10.188.125.3 caaac384a8cd2cb6181925c889557bdb028aec0e wdc06 1023.81M 1.00G db0=669522\n + rediscluster-rc1-node-for-redis-szb9x 172.30.61.97 10.188.125.33 07de0c67262e816f7792f0a68c4c4a5b47291f46 wdc06 1023.80M 1.00G db0=669743 0-2730\n | rediscluster-rc1-node-for-redis-gvkb4 172.30.217.165 10.183.176.51 f1ff1e5c0a77cb7b5850d8808f950c1303f96056 wdc04 1023.76M 1.00G db0=669743\n | rediscluster-rc1-node-for-redis-znn4x 172.30.67.94 10.191.41.163 e0ab667b6629a660b01c20574750e3a487c69a1b wdc07 1023.78M 1.00G db0=669743\n + rediscluster-rc1-node-for-redis-xf8mr 172.30.67.95 10.191.41.163 8e9fe3e022f63f5fcc2a117edaadd93e39bfbb27 wdc07 1023.78M 1.00G db0=669711 8193-10923\n | rediscluster-rc1-node-for-redis-r2qd2 172.30.16.232 10.188.125.3 0da8f5799b86c40f7ea0c3ebf71ce3955afa1dd1 wdc06 1023.79M 1.00G db0=669711\n | rediscluster-rc1-node-for-redis-zfsxt 172.30.140.229 10.183.176.53 b4c22806c4c2529713e6e05b0d6fd994f0e801c7 wdc04 1023.78M 1.00G db0=669711\n\n POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS\n + rediscluster-rc2-node-for-redis-57g56 172.30.68.237 10.191.41.164 cc82878c2bdd6381d569709334db86ef6cfade19 wdc07 175.67M 1.00G db0=113397 5462-10923\n + rediscluster-rc2-node-for-redis-942rt 172.30.140.230 10.183.176.53 291f2f06d13a1d821219bc9acfbd5323be1552d9 wdc04 174.82M 1.00G db0=112681 10924-16383\n + rediscluster-rc2-node-for-redis-gqmhs 172.30.16.233 10.188.125.3 85f3c1c6652b5fb76b40c8bdffe3a9af57f22d4d wdc06 175.34M 1.00G db0=113014 0-5461\n\n NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION\n rc1-node-for-redis default 18/18/18 ClusterOK OK 6/6 2-2/2\n rc2-node-for-redis default 3/3/3 ClusterOK OK 3/3 0-0/0\n")),(0,n.kt)("p",null,"The example above illustrates the Redis cluster plugin's output when there are two clusters being managed by one Redis operator. The top portion of the output is referred to as the Redis cluster's status and the bottom portion refers to the Redis cluster's state."),(0,n.kt)("h2",{id:"redis-cluster-status"},"Redis Cluster Status"),(0,n.kt)("p",null,"The Redis cluster status provides an easy way to see how your cluster is set up and which replicas belong to which primary. Without the cluster state, it's really difficult to determine in which ",(0,n.kt)("inlineCode",{parentName:"p"},"ZONE")," a Redis pod resides. The ",(0,n.kt)("inlineCode",{parentName:"p"},"POD NAME"),", ",(0,n.kt)("inlineCode",{parentName:"p"},"IP"),", and ",(0,n.kt)("inlineCode",{parentName:"p"},"ID")," fields help when parsing logs because these properties are used differently depending on what part of the code is logging messages. The ",(0,n.kt)("inlineCode",{parentName:"p"},"USED MEMORY")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"MAX MEMORY")," fields help determine if you should be seeing evictions, while the ",(0,n.kt)("inlineCode",{parentName:"p"},"KEYS")," and ",(0,n.kt)("inlineCode",{parentName:"p"},"SLOTS")," fields can help determine if you have any hot spots in your cluster."),(0,n.kt)("h3",{id:"redis-cluster-status-fields"},"Redis Cluster Status Fields"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Field"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"POD NAME"),(0,n.kt)("td",{parentName:"tr",align:null},"Name of Redis pod where the first character indicates its role. See the ",(0,n.kt)("a",{parentName:"td",href:"#redis-cluster-pod-role-prefix-legend"},"legend")," for more details")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"IP"),(0,n.kt)("td",{parentName:"tr",align:null},"Internal IP address of the Redis pod")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NODE"),(0,n.kt)("td",{parentName:"tr",align:null},"IP address of the worker node on which the pod has been scheduled")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ID"),(0,n.kt)("td",{parentName:"tr",align:null},"Redis ID of the pod")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ZONE"),(0,n.kt)("td",{parentName:"tr",align:null},"Zone of the worker node on which the pod has been scheduled")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"USED MEMORY"),(0,n.kt)("td",{parentName:"tr",align:null},"Human-readable representation of the total number of bytes allocated by Redis using its allocator")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"MAX MEMORY"),(0,n.kt)("td",{parentName:"tr",align:null},"Human-readable representation of Redis' ",(0,n.kt)("inlineCode",{parentName:"td"},"maxmemory")," configuration directive")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"KEYS"),(0,n.kt)("td",{parentName:"tr",align:null},"List of the number of keys in each database using the form ",(0,n.kt)("inlineCode",{parentName:"td"},"db0:db0_key_count"),",",(0,n.kt)("inlineCode",{parentName:"td"},"db1:db1_key_count"))),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"SLOTS"),(0,n.kt)("td",{parentName:"tr",align:null},"List of the slot ranges owned by this primary")))),(0,n.kt)("h3",{id:"redis-cluster-pod-role-prefix-legend"},"Redis Cluster Pod Role Prefix Legend"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:"center"},"Prefix"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"+"),(0,n.kt)("td",{parentName:"tr",align:null},"Primary")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"|"),(0,n.kt)("td",{parentName:"tr",align:null},"Replica to the primary above")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"?"),(0,n.kt)("td",{parentName:"tr",align:null},"Pod that matches the label selector but is not a part of the cluster yet")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:"center"},"^"),(0,n.kt)("td",{parentName:"tr",align:null},"Pod that is currently joining the cluster but has yet to be assigned a role")))),(0,n.kt)("h2",{id:"redis-cluster-state"},"Redis Cluster State"),(0,n.kt)("p",null,"The Redis cluster state provides additional information that is useful when trying to determine the cluster's health. It also indicates if a scaling, rebalancing, or rolling update operation is occurring."),(0,n.kt)("h3",{id:"redis-cluster-state-fields"},"Redis Cluster State Fields"),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Field"),(0,n.kt)("th",{parentName:"tr",align:null},"Description"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NAME"),(0,n.kt)("td",{parentName:"tr",align:null},"Redis cluster name")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NAMESPACE"),(0,n.kt)("td",{parentName:"tr",align:null},"Namespace of Redis cluster deployment")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"OPS STATUS"),(0,n.kt)("td",{parentName:"tr",align:null},"ClusterOK ","|"," Scaling ","|"," Rebalancing ","|"," RollingUpdate")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"REDIS STATUS"),(0,n.kt)("td",{parentName:"tr",align:null},"OK ","|"," KO ","|"," Scaling ","|"," Calculating Rebalancing ","|"," Rebalancing ","|"," RollingUpdate")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"PODS"),(0,n.kt)("td",{parentName:"tr",align:null},"Current number of ready pods / current number of total pods / desired number of pods")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"NB PRIMARY"),(0,n.kt)("td",{parentName:"tr",align:null},"Current number of primaries / desired number of primaries")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"REPLICATION"),(0,n.kt)("td",{parentName:"tr",align:null},"Current min RF - current max RF / desired RF")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"ZONE SKEW"),(0,n.kt)("td",{parentName:"tr",align:null},"Primary node zone skew / replica node zone skew / BALANCED ","|"," UNBALANCED")))))}p.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/fd0f2061.fd5dd73e.js b/assets/js/fd0f2061.4effe9e9.js similarity index 99% rename from assets/js/fd0f2061.fd5dd73e.js rename to assets/js/fd0f2061.4effe9e9.js index 6d54c7ac..7056f1ab 100644 --- a/assets/js/fd0f2061.fd5dd73e.js +++ b/assets/js/fd0f2061.4effe9e9.js @@ -1 +1 @@ -"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[168],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>f});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var p=o.createContext({}),s=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},d=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,p=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),u=s(r),m=n,f=u["".concat(p,".").concat(m)]||u[m]||c[m]||a;return r?o.createElement(f,i(i({ref:t},d),{},{components:r})):o.createElement(f,i({ref:t},d))}));function f(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:n,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>c,frontMatter:()=>a,metadata:()=>l,toc:()=>s});var o=r(7462),n=(r(7294),r(3905));const a={title:"Rolling Update Procedure",slug:"/rolling-update"},i="Rolling Update Procedure",l={unversionedId:"rolling-update",id:"rolling-update",title:"Rolling Update Procedure",description:"Overview",source:"@site/docs/rolling-update.md",sourceDirName:".",slug:"/rolling-update",permalink:"/operator-for-redis-cluster/rolling-update",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/rolling-update.md",tags:[],version:"current",lastUpdatedAt:1691078446,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Rolling Update Procedure",slug:"/rolling-update"},sidebar:"docs",previous:{title:"Scaling Operations",permalink:"/operator-for-redis-cluster/scaling"},next:{title:"Key Migration",permalink:"/operator-for-redis-cluster/key-migration"}},p={},s=[{value:"Overview",id:"overview",level:2},{value:"Redis cluster upgrades",id:"redis-cluster-upgrades",level:2},{value:"Resource limitations",id:"resource-limitations",level:2}],d={toc:s},u="wrapper";function c(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,o.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"rolling-update-procedure"},"Rolling Update Procedure"),(0,n.kt)("h2",{id:"overview"},"Overview"),(0,n.kt)("p",null,"In production, developers aim for zero downtime when periodically deploying newer versions of their application. Per Kubernetes documentation:"),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},"rolling updates allow Deployments' update to take place with zero downtime by incrementally updating Pods instances with new ones")),(0,n.kt)("p",null,"To learn more about how rolling updates work in k8s, see ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/"},"Performing a Rolling Update"),"."),(0,n.kt)("h2",{id:"redis-cluster-upgrades"},"Redis cluster upgrades"),(0,n.kt)("p",null,"A rolling update occurs when the user applies a change to the Redis cluster pod template spec. For example, a user might update the Redis cluster pod image tag in ",(0,n.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and run ",(0,n.kt)("inlineCode",{parentName:"p"},"helm upgrade"),". When the Redis operator detects the pod template spec change, the following procedure takes place:"),(0,n.kt)("ol",null,(0,n.kt)("li",{parentName:"ol"},"Compare the number of running Redis pods with the number of pods required for the rolling update:",(0,n.kt)("pre",{parentName:"li"},(0,n.kt)("code",{parentName:"pre"},"# migration pods = 1 + replication factor\n# required pods = # primaries x # migration pods\n# pods to create = # required pods + # migration pods - # of running pods\n")),"where ",(0,n.kt)("inlineCode",{parentName:"li"},"# migration pods")," is the number of pods needed to migrate one primary and all of its replicas, ",(0,n.kt)("inlineCode",{parentName:"li"},"# required pods")," is the total number of pods required for the cluster, and ",(0,n.kt)("inlineCode",{parentName:"li"},"# pods to create")," is the number of pods to create on a single rolling update iteration."),(0,n.kt)("li",{parentName:"ol"},"If ",(0,n.kt)("inlineCode",{parentName:"li"},"# pods to create > 0"),", create additional pods with the new pod template spec."),(0,n.kt)("li",{parentName:"ol"},"Separate old nodes and new nodes according to their pod spec hash annotation."),(0,n.kt)("li",{parentName:"ol"},"Select the old primary node to replace with one of the newly created pods."),(0,n.kt)("li",{parentName:"ol"},"Generate the primary to replicas mapping for the newly created pods."),(0,n.kt)("li",{parentName:"ol"},"Attach the new replicas to the new primary. "),(0,n.kt)("li",{parentName:"ol"},"Migrate slots (and by default, keys) from the old primary to the new primary. "),(0,n.kt)("li",{parentName:"ol"},"Detach, forget, and delete the old pods.")),(0,n.kt)("p",null,"The Redis cluster rolling update procedure ensures that there is no downtime as new nodes replace old ones. However, because the migration of keys from old primaries to new ones is a time intensive operation, you may see a temporary decrease in the performance of your cluster during this process. To learn more about step 7, see ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/key-migration"},"key migration"),"."),(0,n.kt)("h2",{id:"resource-limitations"},"Resource limitations"),(0,n.kt)("p",null,"This procedure requires additional resources beyond what is normally allocated to the Redis cluster. More specifically, this procedure creates an extra ",(0,n.kt)("inlineCode",{parentName:"p"},"1 + replication factor")," pods on each rolling update iteration, so you will need ensure that you have allocated sufficient resources. For standard configurations that allow multiple pods per node, you may need to increase memory + cpu on your existing nodes. If you have configured your cluster topology to limit one Redis pod per k8s node, you may need to increase the number of k8s nodes in your worker pool."),(0,n.kt)("p",null,"In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in ",(0,n.kt)("inlineCode",{parentName:"p"},"Pending")," state. This state is difficult to recover from because the Redis operator will continue to apply the rolling update procedure until it completes. If you find your newly created pods are in ",(0,n.kt)("inlineCode",{parentName:"p"},"Pending")," state, increase the allocated memory + cpus."))}c.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkoperator_for_redis_cluster=self.webpackChunkoperator_for_redis_cluster||[]).push([[168],{3905:(e,t,r)=>{r.d(t,{Zo:()=>d,kt:()=>f});var o=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);t&&(o=o.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,o)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var p=o.createContext({}),s=function(e){var t=o.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},d=function(e){var t=s(e.components);return o.createElement(p.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return o.createElement(o.Fragment,{},t)}},m=o.forwardRef((function(e,t){var r=e.components,n=e.mdxType,a=e.originalType,p=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),u=s(r),m=n,f=u["".concat(p,".").concat(m)]||u[m]||c[m]||a;return r?o.createElement(f,i(i({ref:t},d),{},{components:r})):o.createElement(f,i({ref:t},d))}));function f(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var a=r.length,i=new Array(a);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:n,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>c,frontMatter:()=>a,metadata:()=>l,toc:()=>s});var o=r(7462),n=(r(7294),r(3905));const a={title:"Rolling Update Procedure",slug:"/rolling-update"},i="Rolling Update Procedure",l={unversionedId:"rolling-update",id:"rolling-update",title:"Rolling Update Procedure",description:"Overview",source:"@site/docs/rolling-update.md",sourceDirName:".",slug:"/rolling-update",permalink:"/operator-for-redis-cluster/rolling-update",draft:!1,editUrl:"https://cin.github.io/operator-for-redis-cluster/docs/rolling-update.md",tags:[],version:"current",lastUpdatedAt:1691083274,formattedLastUpdatedAt:"Aug 3, 2023",frontMatter:{title:"Rolling Update Procedure",slug:"/rolling-update"},sidebar:"docs",previous:{title:"Scaling Operations",permalink:"/operator-for-redis-cluster/scaling"},next:{title:"Key Migration",permalink:"/operator-for-redis-cluster/key-migration"}},p={},s=[{value:"Overview",id:"overview",level:2},{value:"Redis cluster upgrades",id:"redis-cluster-upgrades",level:2},{value:"Resource limitations",id:"resource-limitations",level:2}],d={toc:s},u="wrapper";function c(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,o.Z)({},d,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"rolling-update-procedure"},"Rolling Update Procedure"),(0,n.kt)("h2",{id:"overview"},"Overview"),(0,n.kt)("p",null,"In production, developers aim for zero downtime when periodically deploying newer versions of their application. Per Kubernetes documentation:"),(0,n.kt)("blockquote",null,(0,n.kt)("p",{parentName:"blockquote"},"rolling updates allow Deployments' update to take place with zero downtime by incrementally updating Pods instances with new ones")),(0,n.kt)("p",null,"To learn more about how rolling updates work in k8s, see ",(0,n.kt)("a",{parentName:"p",href:"https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/"},"Performing a Rolling Update"),"."),(0,n.kt)("h2",{id:"redis-cluster-upgrades"},"Redis cluster upgrades"),(0,n.kt)("p",null,"A rolling update occurs when the user applies a change to the Redis cluster pod template spec. For example, a user might update the Redis cluster pod image tag in ",(0,n.kt)("inlineCode",{parentName:"p"},"charts/node-for-redis/values.yaml")," and run ",(0,n.kt)("inlineCode",{parentName:"p"},"helm upgrade"),". When the Redis operator detects the pod template spec change, the following procedure takes place:"),(0,n.kt)("ol",null,(0,n.kt)("li",{parentName:"ol"},"Compare the number of running Redis pods with the number of pods required for the rolling update:",(0,n.kt)("pre",{parentName:"li"},(0,n.kt)("code",{parentName:"pre"},"# migration pods = 1 + replication factor\n# required pods = # primaries x # migration pods\n# pods to create = # required pods + # migration pods - # of running pods\n")),"where ",(0,n.kt)("inlineCode",{parentName:"li"},"# migration pods")," is the number of pods needed to migrate one primary and all of its replicas, ",(0,n.kt)("inlineCode",{parentName:"li"},"# required pods")," is the total number of pods required for the cluster, and ",(0,n.kt)("inlineCode",{parentName:"li"},"# pods to create")," is the number of pods to create on a single rolling update iteration."),(0,n.kt)("li",{parentName:"ol"},"If ",(0,n.kt)("inlineCode",{parentName:"li"},"# pods to create > 0"),", create additional pods with the new pod template spec."),(0,n.kt)("li",{parentName:"ol"},"Separate old nodes and new nodes according to their pod spec hash annotation."),(0,n.kt)("li",{parentName:"ol"},"Select the old primary node to replace with one of the newly created pods."),(0,n.kt)("li",{parentName:"ol"},"Generate the primary to replicas mapping for the newly created pods."),(0,n.kt)("li",{parentName:"ol"},"Attach the new replicas to the new primary. "),(0,n.kt)("li",{parentName:"ol"},"Migrate slots (and by default, keys) from the old primary to the new primary. "),(0,n.kt)("li",{parentName:"ol"},"Detach, forget, and delete the old pods.")),(0,n.kt)("p",null,"The Redis cluster rolling update procedure ensures that there is no downtime as new nodes replace old ones. However, because the migration of keys from old primaries to new ones is a time intensive operation, you may see a temporary decrease in the performance of your cluster during this process. To learn more about step 7, see ",(0,n.kt)("a",{parentName:"p",href:"/operator-for-redis-cluster/key-migration"},"key migration"),"."),(0,n.kt)("h2",{id:"resource-limitations"},"Resource limitations"),(0,n.kt)("p",null,"This procedure requires additional resources beyond what is normally allocated to the Redis cluster. More specifically, this procedure creates an extra ",(0,n.kt)("inlineCode",{parentName:"p"},"1 + replication factor")," pods on each rolling update iteration, so you will need ensure that you have allocated sufficient resources. For standard configurations that allow multiple pods per node, you may need to increase memory + cpu on your existing nodes. If you have configured your cluster topology to limit one Redis pod per k8s node, you may need to increase the number of k8s nodes in your worker pool."),(0,n.kt)("p",null,"In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in ",(0,n.kt)("inlineCode",{parentName:"p"},"Pending")," state. This state is difficult to recover from because the Redis operator will continue to apply the rolling update procedure until it completes. If you find your newly created pods are in ",(0,n.kt)("inlineCode",{parentName:"p"},"Pending")," state, increase the allocated memory + cpus."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.1ace37a5.js b/assets/js/runtime~main.1ace37a5.js deleted file mode 100644 index b9b8979d..00000000 --- a/assets/js/runtime~main.1ace37a5.js +++ /dev/null @@ -1 +0,0 @@ -(()=>{"use strict";var e,r,t,o,a,n={},f={};function i(e){var r=f[e];if(void 0!==r)return r.exports;var t=f[e]={exports:{}};return n[e].call(t.exports,t,t.exports,i),t.exports}i.m=n,e=[],i.O=(r,t,o,a)=>{if(!t){var n=1/0;for(l=0;l=a)&&Object.keys(i.O).every((e=>i.O[e](t[d])))?t.splice(d--,1):(f=!1,a0&&e[l-1][2]>a;l--)e[l]=e[l-1];e[l]=[t,o,a]},i.n=e=>{var r=e&&e.__esModule?()=>e.default:()=>e;return i.d(r,{a:r}),r},t=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);i.r(a);var n={};r=r||[null,t({}),t([]),t(t)];for(var f=2&o&&e;"object"==typeof f&&!~r.indexOf(f);f=t(f))Object.getOwnPropertyNames(f).forEach((r=>n[r]=()=>e[r]));return n.default=()=>e,i.d(a,n),a},i.d=(e,r)=>{for(var t in r)i.o(r,t)&&!i.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((r,t)=>(i.f[t](e,r),r)),[])),i.u=e=>"assets/js/"+({4:"9ed00105",53:"935f2afb",125:"3d9c95a4",168:"fd0f2061",350:"81482700",394:"d12053bc",505:"8d193b98",514:"1be78505",575:"b3f35ddf",918:"17896441",919:"fd0c4088",978:"b9207088"}[e]||e)+"."+{4:"77f97e25",53:"40149662",125:"3ed583a5",168:"fd5dd73e",350:"b74ab000",394:"2505c139",505:"04a07b21",514:"fc60d9ea",575:"076ba09e",918:"50ab02bb",919:"ff5067dd",972:"2d12ce03",978:"0df17469"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,r)=>Object.prototype.hasOwnProperty.call(e,r),o={},a="operator-for-redis-cluster:",i.l=(e,r,t,n)=>{if(o[e])o[e].push(r);else{var f,d;if(void 0!==t)for(var u=document.getElementsByTagName("script"),l=0;l{f.onerror=f.onload=null,clearTimeout(b);var a=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),a&&a.forEach((e=>e(t))),r)return r(t)},b=setTimeout(c.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=c.bind(null,f.onerror),f.onload=c.bind(null,f.onload),d&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/operator-for-redis-cluster/",i.gca=function(e){return e={17896441:"918",81482700:"350","9ed00105":"4","935f2afb":"53","3d9c95a4":"125",fd0f2061:"168",d12053bc:"394","8d193b98":"505","1be78505":"514",b3f35ddf:"575",fd0c4088:"919",b9207088:"978"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(r,t)=>{var o=i.o(e,r)?e[r]:void 0;if(0!==o)if(o)t.push(o[2]);else if(/^(303|532)$/.test(r))e[r]=0;else{var a=new Promise(((t,a)=>o=e[r]=[t,a]));t.push(o[2]=a);var n=i.p+i.u(r),f=new Error;i.l(n,(t=>{if(i.o(e,r)&&(0!==(o=e[r])&&(e[r]=void 0),o)){var a=t&&("load"===t.type?"missing":t.type),n=t&&t.target&&t.target.src;f.message="Loading chunk "+r+" failed.\n("+a+": "+n+")",f.name="ChunkLoadError",f.type=a,f.request=n,o[1](f)}}),"chunk-"+r,r)}},i.O.j=r=>0===e[r];var r=(r,t)=>{var o,a,n=t[0],f=t[1],d=t[2],u=0;if(n.some((r=>0!==e[r]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(d)var l=d(i)}for(r&&r(t);u{"use strict";var e,r,t,o,n,a={},f={};function i(e){var r=f[e];if(void 0!==r)return r.exports;var t=f[e]={exports:{}};return a[e].call(t.exports,t,t.exports,i),t.exports}i.m=a,e=[],i.O=(r,t,o,n)=>{if(!t){var a=1/0;for(l=0;l=n)&&Object.keys(i.O).every((e=>i.O[e](t[d])))?t.splice(d--,1):(f=!1,n0&&e[l-1][2]>n;l--)e[l]=e[l-1];e[l]=[t,o,n]},i.n=e=>{var r=e&&e.__esModule?()=>e.default:()=>e;return i.d(r,{a:r}),r},t=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var n=Object.create(null);i.r(n);var a={};r=r||[null,t({}),t([]),t(t)];for(var f=2&o&&e;"object"==typeof f&&!~r.indexOf(f);f=t(f))Object.getOwnPropertyNames(f).forEach((r=>a[r]=()=>e[r]));return a.default=()=>e,i.d(n,a),n},i.d=(e,r)=>{for(var t in r)i.o(r,t)&&!i.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((r,t)=>(i.f[t](e,r),r)),[])),i.u=e=>"assets/js/"+({4:"9ed00105",53:"935f2afb",125:"3d9c95a4",168:"fd0f2061",350:"81482700",394:"d12053bc",505:"8d193b98",514:"1be78505",575:"b3f35ddf",918:"17896441",919:"fd0c4088",978:"b9207088"}[e]||e)+"."+{4:"8df2f6a6",53:"40149662",125:"aeee8a2e",168:"4effe9e9",350:"7bacc91a",394:"d294519a",505:"20c777c8",514:"fc60d9ea",575:"076ba09e",918:"50ab02bb",919:"b901191e",972:"2d12ce03",978:"f80f3130"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,r)=>Object.prototype.hasOwnProperty.call(e,r),o={},n="operator-for-redis-cluster:",i.l=(e,r,t,a)=>{if(o[e])o[e].push(r);else{var f,d;if(void 0!==t)for(var u=document.getElementsByTagName("script"),l=0;l{f.onerror=f.onload=null,clearTimeout(b);var n=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),n&&n.forEach((e=>e(t))),r)return r(t)},b=setTimeout(s.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=s.bind(null,f.onerror),f.onload=s.bind(null,f.onload),d&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/operator-for-redis-cluster/",i.gca=function(e){return e={17896441:"918",81482700:"350","9ed00105":"4","935f2afb":"53","3d9c95a4":"125",fd0f2061:"168",d12053bc:"394","8d193b98":"505","1be78505":"514",b3f35ddf:"575",fd0c4088:"919",b9207088:"978"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(r,t)=>{var o=i.o(e,r)?e[r]:void 0;if(0!==o)if(o)t.push(o[2]);else if(/^(303|532)$/.test(r))e[r]=0;else{var n=new Promise(((t,n)=>o=e[r]=[t,n]));t.push(o[2]=n);var a=i.p+i.u(r),f=new Error;i.l(a,(t=>{if(i.o(e,r)&&(0!==(o=e[r])&&(e[r]=void 0),o)){var n=t&&("load"===t.type?"missing":t.type),a=t&&t.target&&t.target.src;f.message="Loading chunk "+r+" failed.\n("+n+": "+a+")",f.name="ChunkLoadError",f.type=n,f.request=a,o[1](f)}}),"chunk-"+r,r)}},i.O.j=r=>0===e[r];var r=(r,t)=>{var o,n,a=t[0],f=t[1],d=t[2],u=0;if(a.some((r=>0!==e[r]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(d)var l=d(i)}for(r&&r(t);u Redis Server Configuration | Operator for Redis Cluster - +
-

Redis Server Configuration

Overview

A Redis server can be configured by providing a Redis configuration file called redis.conf. To read more about the format of this file, see the configuration documentation.

Redis Cluster Configuration

The Redis operator manages clusters that operate in cluster mode. This means every node in the cluster specifies the cluster-enabled yes configuration option. It also means that every node in the cluster will have the same configuration. You do not need to set cluster-enabled explicitly in your configuration because we automatically add the setting if it is not present when a Redis pod starts.

Redis clusters that operate in cluster mode support data sharding, which is essential to ensuring high availability. See the Redis cluster specification to learn more.

Configuration Options

There are various configuration options you will want to consider whether you are using Redis as a cache or as a persistent database. We urge you to read the Redis configuration documentation to understand the tradeoffs for each option.

Defaults

A Redis server is able to start without specifying a redis.conf configuration file or providing override configuration; it will instead use the default settings. We do not recommend using the defaults in a production environment, as your Redis database can quickly exceed the amount of memory allocated to your Redis pods. Our operator currently deploys and manages Redis clusters using Redis 6.2. Read the default configuration for Redis 6.2 to learn more about the specific settings.

Persistence

If you use Redis as a database, you will need to enable persistence. Redis provides multiple persistence options, such as Redis Database (RDB) and Append Only File (AOF). You can read more about the advantages and disadvantages of each persistence option in the persistence documentation.

Snapshotting

To quote the Redis documentation:

By default Redis saves snapshots of the dataset on disk, in a binary file called dump.rdb. You can configure Redis to have it save the dataset every N seconds if there are at least M changes in the dataset, or you can manually call the SAVE or BGSAVE commands.

Using the default settings, snapshotting will occur:

  • after 3600 sec (60 min) if at least 1 key changed
  • after 300 sec (5 min) if at least 10 keys changed
  • after 60 sec if at least 10000 keys changed

Snapshots can be extremely useful for backups and faster restarts. We recommend configuring save to a reasonable value depending on the number of requests per second your database processes.

If you use Redis as a cache, be sure to disable snapshotting by setting save "" in redis.conf. For a large Redis cluster processing thousands of requests per second, disk can fill up fairly quickly with database snapshots. Disabling snapshotting will prevent Redis from dumping the entire dataset to disk all together.

Max Memory

To quote the Redis documentation:

The maxmemory configuration directive is used in order to configure Redis to use a specified amount of memory for the data set.

We highly encourage setting maxmemory to a value lower than the allocated memory to each Redis pod. By default, we set maxmemory to 70% of the allocated Redis pod memory. You should change this value depending on how much additional memory the Redis process consumes doing other operations.

Eviction Policies

See Using Redis as an LRU cache to learn more about which maxmemory-policy is best for your needs. If you use Redis as a database, you will likely want to keep the default maxmemory-policy set to noeviction.

Overriding redis.conf

You have two separate options for overriding the default redis.conf when deploying a Redis cluster. The first is to specify your configuration as key-value pairs in redis.configuration.valueMap:

redis:
configuration:
valueMap:
maxmemory-policy: "volatile-lfu"
maxmemory: "10Gb"
...

Note: be sure to always quote values if you decide to use this approach.

The second option is to specify redis.configuration.file with the path to your redis.conf file. For example:

redis:
configuration:
file: "/path/to/redis.conf"

Your redis.conf file should have the same format as any Redis configuration file:

maxmemory-policy volatile-lfu
maxmemory 10gb
...

Note: do not quote values in Redis configuration files.

Configuration Examples

Redis as a Database

redis:
configuration:
valueMap:
maxmemory-policy: "noeviction" # Do not evict keys even if memory is full
maxmemory: "100gb"
save: "3600 1 300 10 60 10000" # Enable RDB persistence to perform snapshots (see Snapshotting section)
appendonly: "yes" # Enable AOF persistence

Redis as a Cache

redis:
configuration:
valueMap:
maxmemory-policy: "volatile-lfu" # Expire keys based on least frequently used policy
maxmemory: "10gb"
save: "" # Disable saving snapshots of the database to disk
lazyfree-lazy-eviction: "yes" # Asynchronously evict keys
lazyfree-lazy-expire: "yes" # Asynchronously delete expired keys
lazyfree-lazy-server-del: "yes" # Asynchronously delete keys during specific server commands
replica-lazy-flush: "yes" # Asynchronously flush keys after replica resynchronization
cluster-require-full-coverage: "no" # Accept queries even when only part of key space is covered
cluster-allow-reads-when-down: "yes" # Allow nodes to serve reads while cluster is down
- +

Redis Server Configuration

Overview

A Redis server can be configured by providing a Redis configuration file called redis.conf. To read more about the format of this file, see the configuration documentation.

Redis Cluster Configuration

The Redis operator manages clusters that operate in cluster mode. This means every node in the cluster specifies the cluster-enabled yes configuration option. It also means that every node in the cluster will have the same configuration. You do not need to set cluster-enabled explicitly in your configuration because we automatically add the setting if it is not present when a Redis pod starts.

Redis clusters that operate in cluster mode support data sharding, which is essential to ensuring high availability. See the Redis cluster specification to learn more.

Configuration Options

There are various configuration options you will want to consider whether you are using Redis as a cache or as a persistent database. We urge you to read the Redis configuration documentation to understand the tradeoffs for each option.

Defaults

A Redis server is able to start without specifying a redis.conf configuration file or providing override configuration; it will instead use the default settings. We do not recommend using the defaults in a production environment, as your Redis database can quickly exceed the amount of memory allocated to your Redis pods. Our operator currently deploys and manages Redis clusters using Redis 6.2. Read the default configuration for Redis 6.2 to learn more about the specific settings.

Persistence

If you use Redis as a database, you will need to enable persistence. Redis provides multiple persistence options, such as Redis Database (RDB) and Append Only File (AOF). You can read more about the advantages and disadvantages of each persistence option in the persistence documentation.

Snapshotting

To quote the Redis documentation:

By default Redis saves snapshots of the dataset on disk, in a binary file called dump.rdb. You can configure Redis to have it save the dataset every N seconds if there are at least M changes in the dataset, or you can manually call the SAVE or BGSAVE commands.

Using the default settings, snapshotting will occur:

  • after 3600 sec (60 min) if at least 1 key changed
  • after 300 sec (5 min) if at least 10 keys changed
  • after 60 sec if at least 10000 keys changed

Snapshots can be extremely useful for backups and faster restarts. We recommend configuring save to a reasonable value depending on the number of requests per second your database processes.

If you use Redis as a cache, be sure to disable snapshotting by setting save "" in redis.conf. For a large Redis cluster processing thousands of requests per second, disk can fill up fairly quickly with database snapshots. Disabling snapshotting will prevent Redis from dumping the entire dataset to disk all together.

Max Memory

To quote the Redis documentation:

The maxmemory configuration directive is used in order to configure Redis to use a specified amount of memory for the data set.

We highly encourage setting maxmemory to a value lower than the allocated memory to each Redis pod. By default, we set maxmemory to 70% of the allocated Redis pod memory. You should change this value depending on how much additional memory the Redis process consumes doing other operations.

Eviction Policies

See Using Redis as an LRU cache to learn more about which maxmemory-policy is best for your needs. If you use Redis as a database, you will likely want to keep the default maxmemory-policy set to noeviction.

Overriding redis.conf

You have two separate options for overriding the default redis.conf when deploying a Redis cluster. The first is to specify your configuration as key-value pairs in redis.configuration.valueMap:

redis:
configuration:
valueMap:
maxmemory-policy: "volatile-lfu"
maxmemory: "10Gb"
...

Note: be sure to always quote values if you decide to use this approach.

The second option is to specify redis.configuration.file with the path to your redis.conf file. For example:

redis:
configuration:
file: "/path/to/redis.conf"

Your redis.conf file should have the same format as any Redis configuration file:

maxmemory-policy volatile-lfu
maxmemory 10gb
...

Note: do not quote values in Redis configuration files.

Configuration Examples

Redis as a Database

redis:
configuration:
valueMap:
maxmemory-policy: "noeviction" # Do not evict keys even if memory is full
maxmemory: "100gb"
save: "3600 1 300 10 60 10000" # Enable RDB persistence to perform snapshots (see Snapshotting section)
appendonly: "yes" # Enable AOF persistence

Redis as a Cache

redis:
configuration:
valueMap:
maxmemory-policy: "volatile-lfu" # Expire keys based on least frequently used policy
maxmemory: "10gb"
save: "" # Disable saving snapshots of the database to disk
lazyfree-lazy-eviction: "yes" # Asynchronously evict keys
lazyfree-lazy-expire: "yes" # Asynchronously delete expired keys
lazyfree-lazy-server-del: "yes" # Asynchronously delete keys during specific server commands
replica-lazy-flush: "yes" # Asynchronously flush keys after replica resynchronization
cluster-require-full-coverage: "no" # Accept queries even when only part of key space is covered
cluster-allow-reads-when-down: "yes" # Allow nodes to serve reads while cluster is down
+ \ No newline at end of file diff --git a/contributing.html b/contributing.html index 2564a21f..a1a00b47 100644 --- a/contributing.html +++ b/contributing.html @@ -4,14 +4,14 @@ Contributing | Operator for Redis Cluster - +

Contributing

Set up your machine

Refer to our cookbook to learn how to set up your machine.

Development process

This section assumes you have already set up your environment to build and install the Redis operator and cluster.

Create a branch

The first step to contributing is creating a branch off of the main branch in your forked project. Branch names should be well formatted. Start your branch name with a type. Choose one of the following: -feat, fix, bug, docs, style, refactor, perf, test, add, remove, move, bump, update, release

Example:

$ git checkout -b feat/node-scaling

Commit your code

Make your desired changes to the branch and then commit your work:

$ git add .
$ git commit -m "<USEFUL_MESSAGE>"
$ git push --set-upstream origin <BRANCH_NAME>

When you are ready to make a pull request, we suggest you run:

$ make generate
//path/to/go/bin/controller-gen object paths="./..."
$ make fmt
find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do gofmt -w -s "$file"; goimports -w "$file"; done
$ make lint
golangci-lint run --enable exportloopref
$ make test
./go.test.sh
...

These steps will:

  1. Regenerate the RedisCluster CRD
  2. Format the code according to gofmt standards
  3. Run the linter
  4. Run the unit tests

End-to-end tests

To run the end-to-end tests, you need to have a running Kubernetes cluster. Follow the steps in the cookbook.

Submit a pull request

Push your branch to your redis-operator fork and open a pull request against the main branch in the official project. When you open a PR, be sure to include a description explaining your changes, as well as

Resolves #<ISSUE_NUMBER>

We also ask that you add labels describing the t-shirt size of the task (S, M, L, XL) and the task type (enhancement, documentation, bug, etc.).

- +feat, fix, bug, docs, style, refactor, perf, test, add, remove, move, bump, update, release

Example:

$ git checkout -b feat/node-scaling

Commit your code

Make your desired changes to the branch and then commit your work:

$ git add .
$ git commit -m "<USEFUL_MESSAGE>"
$ git push --set-upstream origin <BRANCH_NAME>

When you are ready to make a pull request, we suggest you run:

$ make generate
//path/to/go/bin/controller-gen object paths="./..."
$ make fmt
find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do gofmt -w -s "$file"; goimports -w "$file"; done
$ make lint
golangci-lint run --enable exportloopref
$ make test
./go.test.sh
...

These steps will:

  1. Regenerate the RedisCluster CRD
  2. Format the code according to gofmt standards
  3. Run the linter
  4. Run the unit tests

End-to-end tests

To run the end-to-end tests, you need to have a running Kubernetes cluster. Follow the steps in the cookbook.

Submit a pull request

Push your branch to your redis-operator fork and open a pull request against the main branch in the official project. When you open a PR, be sure to include a description explaining your changes, as well as

Resolves #<ISSUE_NUMBER>

We also ask that you add labels describing the t-shirt size of the task (S, M, L, XL) and the task type (enhancement, documentation, bug, etc.).

+ \ No newline at end of file diff --git a/cookbook.html b/cookbook.html index ad5d8422..803f0627 100644 --- a/cookbook.html +++ b/cookbook.html @@ -4,13 +4,13 @@ Cookbook | Operator for Redis Cluster - +
-

Cookbook

Installation

Operator for Redis Cluster is written in Go.

Required Dependencies

Download and build the source code

Start by making a fork of the operator-for-redis-cluster repository. Then, clone your forked repo:

$ git clone git@github.com:<YOUR_USERNAME>/operator-for-redis-cluster.git
Cloning into 'operator-for-redis-cluster'...
$ cd operator-for-redis-cluster

Build the project:

$ make build
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:33 -s" -o bin/operator ./cmd/operator
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:35 -s" -o bin/node ./cmd/node
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:37 -s" -o bin/metrics ./cmd/metrics

Run the test suite to make sure everything works:

$ make test
./go.test.sh
ok github.com/IBM/operator-for-redis-cluster/pkg/controller 5.162s coverage: 33.1% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/clustering 0.711s coverage: 75.6% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/pod 1.726s coverage: 40.0% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/sanitycheck 0.631s coverage: 21.5% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/garbagecollector 1.740s coverage: 75.0% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redis 0.728s coverage: 22.4% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redis/fake 0.148s coverage: 85.8% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redisnode 1.924s coverage: 43.4% of statements

Install the kubectl Redis cluster plugin (more info here)

$ make plugin

Create a Kubernetes cluster

To run the Redis operator, you need to have a running Kubernetes cluster. You can use local k8s cluster frameworks such as kind or minikube. Use the following guide to install a kind cluster similar to what we use in our e2e tests.

From the project root directory, create your kind cluster using the e2e test configuration:

$ kind create cluster --config ./test/e2e/kind_config.yml

Build the required docker images:

make container PREFIX= TAG=latest

Once the kind cluster is up and running, load the images into the kind cluster:

$ kind load docker-image operator-for-redis:latest
$ kind load docker-image node-for-redis:latest
$ kind load docker-image metrics-for-redis:latest

Deploy a Redis operator

Install the operator-for-redis Helm chart:

$ helm install op charts/operator-for-redis --wait --set image.repository=operator-for-redis --set image.tag=latest
NAME: op
LAST DEPLOYED: Thu Oct 21 15:11:51 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

Confirm that the operator is running properly:

$ kubectl get pods
NAME READY STATUS RESTARTS AGE
op-operator-for-redis-64dbfb4b59-xjttw 1/1 Running 0 31s

Deploy a Redis cluster

Install the node-for-redis Helm chart:

$ helm install --wait cluster charts/node-for-redis --set image.repository=node-for-redis --set image.tag=latest
NAME: cluster
LAST DEPLOYED: Thu Oct 21 15:12:05 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

Check the cluster status:

$ kubectl rc
POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-cluster-node-for-redis-2h92v 10.244.1.89 172.18.0.3 5606ea9ab09678124a4b17de10ab92a78aac0b4d dal13 35.55M 10.95G 5462-10923
| rediscluster-cluster-node-for-redis-nf24b 10.244.2.89 172.18.0.2 6840c0e5db16ebf073f57c67a6487c1c7f0d12d1 dal10 2.62M 10.95G
+ rediscluster-cluster-node-for-redis-4h4s2 10.244.2.88 172.18.0.2 8a2f2db39b85cf059e88dc80d7c9cafefac94de0 dal10 34.63M 10.95G 10924-16383
| rediscluster-cluster-node-for-redis-77bt2 10.244.3.87 172.18.0.4 74582d0e0cedb458e81f1e9d4f32cdc3f5e9399b dal12 2.60M 10.95G
+ rediscluster-cluster-node-for-redis-dh6pt 10.244.3.86 172.18.0.4 81f5c13bec9a0545a62de08b2a309a87d29855c7 dal12 2.83M 10.95G 0-5461
| rediscluster-cluster-node-for-redis-jnh2h 10.244.1.88 172.18.0.3 ffae381633377414597731597518529255fd9b69 dal13 2.64M 10.95G

NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION ZONE SKEW
cluster-node-for-redis default 6/6/6 ClusterOK OK 3/3 1-1/1 0/0/BALANCED

Clean up your environment

Delete the Redis cluster:

$ helm uninstall cluster
release "cluster" deleted

Delete the Redis operator:

$ helm uninstall op
release "op" deleted

Ensure all pods have been deleted:

$ kubectl get pods
No resources found in default namespace.

Run end-to-end tests

If you followed the steps for creating a kind cluster with the e2e test configuration, then running the e2e tests is simple.

Build the required docker images:

make container PREFIX=ibmcom/ TAG=local
make container-node PREFIX=ibmcom/ TAG=new

Note that we need both local and new image tags for a rolling update e2e test case.

Load the required images into the kind cluster:

$ kind load docker-image ibmcom/operator-for-redis:local
$ kind load docker-image ibmcom/node-for-redis:local
$ kind load docker-image ibmcom/node-for-redis:new

Once the kind cluster is up and running, deploy the operator-for-redis Helm chart:

$ helm install op charts/operator-for-redis --wait --set image.repository=ibmcom/operator-for-redis --set image.tag=local
NAME: op
LAST DEPLOYED: Thu Oct 21 15:11:51 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

When the operator-for-redis pod is up and running, you can start the e2e regression:

$ go test -timeout 30m ./test/e2e --kubeconfig=$HOME/.kube/config --ginkgo.v --test.v
Running Suite: RedisCluster Suite
=================================
Random Seed: 1634845111
Will run 11 of 11 specs

Oct 21 15:38:31.261: INFO: KubeconfigPath-> "/Users/kscharm/.kube/config"
Oct 21 15:38:31.295: INFO: Check whether RedisCluster resource is registered...
RedisCluster CRUD operations
should create a RedisCluster

...

Ran 11 of 11 Specs in 517.299 seconds
SUCCESS! -- 11 Passed | 0 Failed | 0 Pending | 0 Skipped
PASS
ok github.com/IBM/operator-for-redis-cluster/test/e2e 517.776s
- +

Cookbook

Installation

Operator for Redis Cluster is written in Go.

Required Dependencies

Download and build the source code

Start by making a fork of the operator-for-redis-cluster repository. Then, clone your forked repo:

$ git clone git@github.com:<YOUR_USERNAME>/operator-for-redis-cluster.git
Cloning into 'operator-for-redis-cluster'...
$ cd operator-for-redis-cluster

Build the project:

$ make build
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:33 -s" -o bin/operator ./cmd/operator
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:35 -s" -o bin/node ./cmd/node
CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-w -X github.com/IBM/operator-for-redis-cluster/pkg/utils.TAG=0.3.4 -X github.com/IBM/operator-for-redis-cluster/pkg/utils.COMMIT=81c58d3bb6e713679637d9971fc8f795ca5a3e2f -X github.com/IBM/operator-for-redis-cluster/pkg/utils.OPERATOR_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.REDIS_VERSION= -X github.com/IBM/operator-for-redis-cluster/pkg/utils.BUILDTIME=2021-10-21/12:44:37 -s" -o bin/metrics ./cmd/metrics

Run the test suite to make sure everything works:

$ make test
./go.test.sh
ok github.com/IBM/operator-for-redis-cluster/pkg/controller 5.162s coverage: 33.1% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/clustering 0.711s coverage: 75.6% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/pod 1.726s coverage: 40.0% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/controller/sanitycheck 0.631s coverage: 21.5% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/garbagecollector 1.740s coverage: 75.0% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redis 0.728s coverage: 22.4% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redis/fake 0.148s coverage: 85.8% of statements
ok github.com/IBM/operator-for-redis-cluster/pkg/redisnode 1.924s coverage: 43.4% of statements

Install the kubectl Redis cluster plugin (more info here)

$ make plugin

Create a Kubernetes cluster

To run the Redis operator, you need to have a running Kubernetes cluster. You can use local k8s cluster frameworks such as kind or minikube. Use the following guide to install a kind cluster similar to what we use in our e2e tests.

From the project root directory, create your kind cluster using the e2e test configuration:

$ kind create cluster --config ./test/e2e/kind_config.yml

Build the required docker images:

make container PREFIX= TAG=latest

Once the kind cluster is up and running, load the images into the kind cluster:

$ kind load docker-image operator-for-redis:latest
$ kind load docker-image node-for-redis:latest
$ kind load docker-image metrics-for-redis:latest

Deploy a Redis operator

Install the operator-for-redis Helm chart:

$ helm install op charts/operator-for-redis --wait --set image.repository=operator-for-redis --set image.tag=latest
NAME: op
LAST DEPLOYED: Thu Oct 21 15:11:51 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

Confirm that the operator is running properly:

$ kubectl get pods
NAME READY STATUS RESTARTS AGE
op-operator-for-redis-64dbfb4b59-xjttw 1/1 Running 0 31s

Deploy a Redis cluster

Install the node-for-redis Helm chart:

$ helm install --wait cluster charts/node-for-redis --set image.repository=node-for-redis --set image.tag=latest
NAME: cluster
LAST DEPLOYED: Thu Oct 21 15:12:05 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

Check the cluster status:

$ kubectl rc
POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-cluster-node-for-redis-2h92v 10.244.1.89 172.18.0.3 5606ea9ab09678124a4b17de10ab92a78aac0b4d dal13 35.55M 10.95G 5462-10923
| rediscluster-cluster-node-for-redis-nf24b 10.244.2.89 172.18.0.2 6840c0e5db16ebf073f57c67a6487c1c7f0d12d1 dal10 2.62M 10.95G
+ rediscluster-cluster-node-for-redis-4h4s2 10.244.2.88 172.18.0.2 8a2f2db39b85cf059e88dc80d7c9cafefac94de0 dal10 34.63M 10.95G 10924-16383
| rediscluster-cluster-node-for-redis-77bt2 10.244.3.87 172.18.0.4 74582d0e0cedb458e81f1e9d4f32cdc3f5e9399b dal12 2.60M 10.95G
+ rediscluster-cluster-node-for-redis-dh6pt 10.244.3.86 172.18.0.4 81f5c13bec9a0545a62de08b2a309a87d29855c7 dal12 2.83M 10.95G 0-5461
| rediscluster-cluster-node-for-redis-jnh2h 10.244.1.88 172.18.0.3 ffae381633377414597731597518529255fd9b69 dal13 2.64M 10.95G

NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION ZONE SKEW
cluster-node-for-redis default 6/6/6 ClusterOK OK 3/3 1-1/1 0/0/BALANCED

Clean up your environment

Delete the Redis cluster:

$ helm uninstall cluster
release "cluster" deleted

Delete the Redis operator:

$ helm uninstall op
release "op" deleted

Ensure all pods have been deleted:

$ kubectl get pods
No resources found in default namespace.

Run end-to-end tests

If you followed the steps for creating a kind cluster with the e2e test configuration, then running the e2e tests is simple.

Build the required docker images:

make container PREFIX=ibmcom/ TAG=local
make container-node PREFIX=ibmcom/ TAG=new

Note that we need both local and new image tags for a rolling update e2e test case.

Load the required images into the kind cluster:

$ kind load docker-image ibmcom/operator-for-redis:local
$ kind load docker-image ibmcom/node-for-redis:local
$ kind load docker-image ibmcom/node-for-redis:new

Once the kind cluster is up and running, deploy the operator-for-redis Helm chart:

$ helm install op charts/operator-for-redis --wait --set image.repository=ibmcom/operator-for-redis --set image.tag=local
NAME: op
LAST DEPLOYED: Thu Oct 21 15:11:51 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

When the operator-for-redis pod is up and running, you can start the e2e regression:

$ go test -timeout 30m ./test/e2e --kubeconfig=$HOME/.kube/config --ginkgo.v --test.v
Running Suite: RedisCluster Suite
=================================
Random Seed: 1634845111
Will run 11 of 11 specs

Oct 21 15:38:31.261: INFO: KubeconfigPath-> "/Users/kscharm/.kube/config"
Oct 21 15:38:31.295: INFO: Check whether RedisCluster resource is registered...
RedisCluster CRUD operations
should create a RedisCluster

...

Ran 11 of 11 Specs in 517.299 seconds
SUCCESS! -- 11 Passed | 0 Failed | 0 Pending | 0 Skipped
PASS
ok github.com/IBM/operator-for-redis-cluster/test/e2e 517.776s
+ \ No newline at end of file diff --git a/index.html b/index.html index 6904e015..3aac6f00 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ Home | Operator for Redis Cluster - +
-

logo

Project status: alpha

This is an ongoing project.

The goal of this project is to simplify the deployment and management of a Redis cluster in a Kubernetes environment. It started internally at Amadeus in 2016, where it was initially designed to run on Openshift. This is the third version of the Redis operator, which leverages the Operator SDK framework for operators.

Overview

This project contains two Helm charts, namely operator-for-redis and node-for-redis. The first chart deploys the Redis operator, RedisCluster Custom Resource Definition (CRD), and various other k8s resources. The second chart deploys the RedisCluster resource and various other k8s resources. Each node in the Redis cluster runs in its own Pod. Upon startup, each node joins the cluster as a primary node with no slots. See the cluster representation in the diagram below:

Initial state

At this point, your Redis process is running and each node is aware of each other, but only one primary has all the slots. In order to properly configure each node in the cluster, we introduce the Operator for Redis Cluster.

The operator watches the RedisCluster CR that stores cluster configuration: number of primaries, replication factor (number of replicas per primary), and the pod template. Then the operator tries to apply this configuration to the set of Redis server processes. If the number of Redis servers doesn't match the provided configuration, the manager scales the number of pods to obtain the proper number of Redis nodes. The operator continuously reconciles the state of the cluster with the configuration stored in the RedisCluster CR until they match. To understand how the reconciliation loop works, see the Operator SDK docs.

Deployment

You can follow the cookbook to deploy the operator and Redis cluster with Minikube.

Environment requirements

The project may have started on Openshift, but it now supports Kubernetes as well. Please check the minimum environment version in the table below.

EnvironmentVersion
Openshift>= 3.7
Kubernetes>= 1.7

Helm chart deployment

You can find two Helm charts in the charts folder:

  • operator-for-redis used to deploy the operator in your Kubernetes cluster.
  • node-for-redis used to create the RedisCluster CR that will be managed by the operator.

Operator deployment example:

helm install operator-for-redis charts/operator-for-redis
NAME: operator-for-redis
LAST DEPLOYED: Fri Aug 13 11:48:29 2021
NAMESPACE: default
STATUS: deployed

RESOURCES:
==> v1/Deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
operator-for-redis 1 1 1 1 10s

Create the RedisCluster

You can configure the topology of the cluster by editing the provided values.yaml, using an override file, and/or setting each value with --set when you execute helm install.

Redis cluster deployment example:

helm install node-for-redis charts/node-for-redis
NAME: node-for-redis
LAST DEPLOYED: Fri Aug 13 11:48:29 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

! Warning !, if you want to use the docker images corresponding to the level of code present in the "main" branch. You need to set the image tag when you instantiate the node-for-redis chart and the operator-for-redis-cluster chart. The "latest" tag is corresponding to the last validated release.

helm install node-for-redis charts/node-for-redis --set image.tag=main-$COMMIT-dev

Install kubectl redis-cluster plugin

Docs available here.

Deployment from source code

Build container images

cd $GOPATH/src/github.com/IBM/operator-for-redis-cluster
make container

you can define the docker images tag by adding the variable "TAG"

make TAG=<Your-TAG> container

How to Release the Redis Operator

Do the following in main branch:

  1. Create a tag on commit
  2. Push the commit and tag
  3. Github actions automation will build and push docker images and helm charts with release version

NOTE: If you need to test the build prior to the above steps, you can run: make build and resolve any issues.

How to Upgrade Redis Client Version

To upgrade your Redis client version, you will need to update the REDIS_VERSION variable in both the Dockerfile for Redis node and the Github release workflow. Please note that upgrading the Redis client version may impact functionality because the operator depends on the radix library for executing Redis commands.

- +

logo

Project status: alpha

This is an ongoing project.

The goal of this project is to simplify the deployment and management of a Redis cluster in a Kubernetes environment. It started internally at Amadeus in 2016, where it was initially designed to run on Openshift. This is the third version of the Redis operator, which leverages the Operator SDK framework for operators.

Overview

This project contains two Helm charts, namely operator-for-redis and node-for-redis. The first chart deploys the Redis operator, RedisCluster Custom Resource Definition (CRD), and various other k8s resources. The second chart deploys the RedisCluster resource and various other k8s resources. Each node in the Redis cluster runs in its own Pod. Upon startup, each node joins the cluster as a primary node with no slots. See the cluster representation in the diagram below:

Initial state

At this point, your Redis process is running and each node is aware of each other, but only one primary has all the slots. In order to properly configure each node in the cluster, we introduce the Operator for Redis Cluster.

The operator watches the RedisCluster CR that stores cluster configuration: number of primaries, replication factor (number of replicas per primary), and the pod template. Then the operator tries to apply this configuration to the set of Redis server processes. If the number of Redis servers doesn't match the provided configuration, the manager scales the number of pods to obtain the proper number of Redis nodes. The operator continuously reconciles the state of the cluster with the configuration stored in the RedisCluster CR until they match. To understand how the reconciliation loop works, see the Operator SDK docs.

Deployment

You can follow the cookbook to deploy the operator and Redis cluster with Minikube.

Environment requirements

The project may have started on Openshift, but it now supports Kubernetes as well. Please check the minimum environment version in the table below.

EnvironmentVersion
Openshift>= 3.7
Kubernetes>= 1.7

Helm chart deployment

You can find two Helm charts in the charts folder:

  • operator-for-redis used to deploy the operator in your Kubernetes cluster.
  • node-for-redis used to create the RedisCluster CR that will be managed by the operator.

Operator deployment example:

helm install operator-for-redis charts/operator-for-redis
NAME: operator-for-redis
LAST DEPLOYED: Fri Aug 13 11:48:29 2021
NAMESPACE: default
STATUS: deployed

RESOURCES:
==> v1/Deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
operator-for-redis 1 1 1 1 10s

Create the RedisCluster

You can configure the topology of the cluster by editing the provided values.yaml, using an override file, and/or setting each value with --set when you execute helm install.

Redis cluster deployment example:

helm install node-for-redis charts/node-for-redis
NAME: node-for-redis
LAST DEPLOYED: Fri Aug 13 11:48:29 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

! Warning !, if you want to use the docker images corresponding to the level of code present in the "main" branch. You need to set the image tag when you instantiate the node-for-redis chart and the operator-for-redis-cluster chart. The "latest" tag is corresponding to the last validated release.

helm install node-for-redis charts/node-for-redis --set image.tag=main-$COMMIT-dev

Install kubectl redis-cluster plugin

Docs available here.

Deployment from source code

Build container images

cd $GOPATH/src/github.com/IBM/operator-for-redis-cluster
make container

you can define the docker images tag by adding the variable "TAG"

make TAG=<Your-TAG> container

How to Release the Redis Operator

Do the following in main branch:

  1. Create a tag on commit
  2. Push the commit and tag
  3. Github actions automation will build and push docker images and helm charts with release version

NOTE: If you need to test the build prior to the above steps, you can run: make build and resolve any issues.

How to Upgrade Redis Client Version

To upgrade your Redis client version, you will need to update the REDIS_VERSION variable in both the Dockerfile for Redis node and the Github release workflow. Please note that upgrading the Redis client version may impact functionality because the operator depends on the radix library for executing Redis commands.

+ \ No newline at end of file diff --git a/key-migration.html b/key-migration.html index cbdeec3b..fc1e0415 100644 --- a/key-migration.html +++ b/key-migration.html @@ -4,13 +4,13 @@ Key Migration | Operator for Redis Cluster - +
-

Key Migration

Overview

Key migration is the process by which Redis migrates keys from a source primary node to a destination primary node. The high-level steps for migrating keys can be found here. This feature allows users to better configure how keys are migrated, if at all.

Depending on the size of the Redis cluster, the key migration process can be time-consuming. For example, a cluster with thousands of Gigabytes of data can take hours to migrate keys during a scaling or rolling update operation. To speed up the scaling process, we give users the option to migrate slots without keys and provide configuration to control batching.

Redis cluster migration configuration

Default

rollingUpdate:
keyMigration: true
keyBatchSize: 10000
slotBatchSize: 16
idleTimeoutMillis: 30000
warmingDelayMillis: 0

scaling:
keyBatchSize: 10000
slotBatchSize: 16
idleTimeoutMillis: 30000

If you observe the default configuration above, you will notice that there are two separate sections for configuring key migration during rolling updates and scaling operations. The rollingUpdate section determines how keys are migrated during rolling updates, and the scaling section determines how keys are migrated during scaling operations. The following definitions apply to both configurations.

Definitions

keyMigration specifies whether to migrate keys during rolling updates. For most use cases, users will want to keep this value set to true. However, for users who use Redis cluster as a caching tool instead of a persistent database, you may want to consider setting this to false. When set to false, this feature will transfer slots from the old Redis primary to the new primary without migrating keys. For large clusters, this can save a significant amount of time with the tradeoff of temporarily increasing the number of requests to the backend. The increase in backend hit rate can be mitigated by modifying warmingDelayMillis. The next sections will discuss the two different configuration options for key migration.

keyBatchSize determines the number of keys to get from a single slot during each migration iteration. By default, this value is 10000 keys.

slotBatchSize specifies the number of slots to migrate on each iteration. By default, this value is 16 slots. For most use cases, set this value to the number of logical CPUs. Usually, this is two times the CPU resource limits in the Redis operator deployment. See runtime.CPU for more information on how Go checks the number of available CPUs. Also, keep in mind a Redis cluster has 16384 total slots, and those slots are evenly distributed across the primary nodes.

idleTimeoutMillis is the maximum idle time at any point during key migration. This means the migration should make progress without blocking for more than the specified number of milliseconds. See the Redis migrate command for more information about the timeout.

warmingDelayMillis is the amount of time in between each batch of slots. As the name suggests, it allows the new Redis node to warm its cache before moving on to the next node in the rolling update.

Rolling update key migration enabled - rollingUpdate.keyMigration: true

keyBatchSize: change this value depending on the total number of keys in your Redis cluster. Increasing this value can reduce the amount of time it takes to migrate keys by moving a larger number of keys per batch of slots.

slotBatchSize: increasing this value higher than the number of logical CPUs will have minimal effect on rolling update performance.

idleTimeoutMillis: do not modify this value unless you receive this specific error.

warmingDelayMillis: it's best to set this value to zero unless you want to introduce a delay between slot migrations. You can still set a non-zero delay if you want to reduce the overall strain on the cluster by the migration calls, and you do not care about how long the migration takes.

Examples

Assume all clusters have allocated 4 physical CPUs and 1 Gb memory for the Redis operator. We have 8 logical CPUs.

Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of 2,500 slots

scaling: # For scaling operations,
keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Observe how keyBatchSize is significantly greater than in the previous example because we have ten times the data per node.

Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We optimize for fast key migration on both rolling updates and scaling operations:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Rolling update key migration disabled - rollingUpdate.keyMigration: false

keyBatchSize: not used when no keys are migrated.

slotBatchSize: depends on how quickly you want to wipe the cache. You can use a smaller slotBatchSize and increase warmingDelayMillis to make this process go slower. If your backend can handle a higher increase in requests, you can set warmingDelayMillis to something small or even zero.

idleTimeoutMillis: not used in when no keys are migrated.

warmingDelayMillis: increasing this value will ease the strain on your backend as slot ownership transfers during a rolling update by pausing after migrating a single batch of slots.

Please be sure to properly configure rollingUpdate based on your Redis cluster if you plan on using this configuration. Setting too small a value for warmingDelayMillis will quickly wipe all the keys in your cluster without yielding sufficient time for new Redis nodes to warm up.

Examples

Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 1000 # Wait 1 second between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 10000 # Wait 10 seconds between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We want to wipe the cache as quickly as possible on rolling updates, while also maintaining fast key migration on scaling operations:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
- +

Key Migration

Overview

Key migration is the process by which Redis migrates keys from a source primary node to a destination primary node. The high-level steps for migrating keys can be found here. This feature allows users to better configure how keys are migrated, if at all.

Depending on the size of the Redis cluster, the key migration process can be time-consuming. For example, a cluster with thousands of Gigabytes of data can take hours to migrate keys during a scaling or rolling update operation. To speed up the scaling process, we give users the option to migrate slots without keys and provide configuration to control batching.

Redis cluster migration configuration

Default

rollingUpdate:
keyMigration: true
keyBatchSize: 10000
slotBatchSize: 16
idleTimeoutMillis: 30000
warmingDelayMillis: 0

scaling:
keyBatchSize: 10000
slotBatchSize: 16
idleTimeoutMillis: 30000

If you observe the default configuration above, you will notice that there are two separate sections for configuring key migration during rolling updates and scaling operations. The rollingUpdate section determines how keys are migrated during rolling updates, and the scaling section determines how keys are migrated during scaling operations. The following definitions apply to both configurations.

Definitions

keyMigration specifies whether to migrate keys during rolling updates. For most use cases, users will want to keep this value set to true. However, for users who use Redis cluster as a caching tool instead of a persistent database, you may want to consider setting this to false. When set to false, this feature will transfer slots from the old Redis primary to the new primary without migrating keys. For large clusters, this can save a significant amount of time with the tradeoff of temporarily increasing the number of requests to the backend. The increase in backend hit rate can be mitigated by modifying warmingDelayMillis. The next sections will discuss the two different configuration options for key migration.

keyBatchSize determines the number of keys to get from a single slot during each migration iteration. By default, this value is 10000 keys.

slotBatchSize specifies the number of slots to migrate on each iteration. By default, this value is 16 slots. For most use cases, set this value to the number of logical CPUs. Usually, this is two times the CPU resource limits in the Redis operator deployment. See runtime.CPU for more information on how Go checks the number of available CPUs. Also, keep in mind a Redis cluster has 16384 total slots, and those slots are evenly distributed across the primary nodes.

idleTimeoutMillis is the maximum idle time at any point during key migration. This means the migration should make progress without blocking for more than the specified number of milliseconds. See the Redis migrate command for more information about the timeout.

warmingDelayMillis is the amount of time in between each batch of slots. As the name suggests, it allows the new Redis node to warm its cache before moving on to the next node in the rolling update.

Rolling update key migration enabled - rollingUpdate.keyMigration: true

keyBatchSize: change this value depending on the total number of keys in your Redis cluster. Increasing this value can reduce the amount of time it takes to migrate keys by moving a larger number of keys per batch of slots.

slotBatchSize: increasing this value higher than the number of logical CPUs will have minimal effect on rolling update performance.

idleTimeoutMillis: do not modify this value unless you receive this specific error.

warmingDelayMillis: it's best to set this value to zero unless you want to introduce a delay between slot migrations. You can still set a non-zero delay if you want to reduce the overall strain on the cluster by the migration calls, and you do not care about how long the migration takes.

Examples

Assume all clusters have allocated 4 physical CPUs and 1 Gb memory for the Redis operator. We have 8 logical CPUs.

Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of 2,500 slots

scaling: # For scaling operations,
keyBatchSize: 1000 # Migrate keys in batches of 1,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Observe how keyBatchSize is significantly greater than in the previous example because we have ten times the data per node.

Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We optimize for fast key migration on both rolling updates and scaling operations:

rollingUpdate:                # For rolling updates,
keyMigration: true # Key migration is enabled
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Rolling update key migration disabled - rollingUpdate.keyMigration: false

keyBatchSize: not used when no keys are migrated.

slotBatchSize: depends on how quickly you want to wipe the cache. You can use a smaller slotBatchSize and increase warmingDelayMillis to make this process go slower. If your backend can handle a higher increase in requests, you can set warmingDelayMillis to something small or even zero.

idleTimeoutMillis: not used in when no keys are migrated.

warmingDelayMillis: increasing this value will ease the strain on your backend as slot ownership transfers during a rolling update by pausing after migrating a single batch of slots.

Please be sure to properly configure rollingUpdate based on your Redis cluster if you plan on using this configuration. Setting too small a value for warmingDelayMillis will quickly wipe all the keys in your cluster without yielding sufficient time for new Redis nodes to warm up.

Examples

Given a small redis cluster with 3 primaries, RF = 1, and maximum memory of 1 Gb per node - we have the following configuration:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 1000 # Wait 1 second between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Given a large redis cluster with 20 primaries, RF = 1, and maximum memory of 10 Gb per node:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 10000 # Wait 10 seconds between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 10000 # Migrate keys in batches of 10,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration

Here we have a cluster with 10 primaries, RF = 1, and maximum memory of 5Gb per node. We want to wipe the cache as quickly as possible on rolling updates, while also maintaining fast key migration on scaling operations:

rollingUpdate:                # For rolling updates,
keyMigration: false # Key migration is disabled
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
warmingDelayMillis: 0 # No delay between each batch of slots

scaling: # For scaling operations,
keyBatchSize: 50000 # Migrate keys in batches of 50,000 per slot
slotBatchSize: 8 # Transfer 8 slots on each migration iteration
idleTimeoutMillis: 30000 # Wait up to 30 seconds for any delay in communication during the migration
+ \ No newline at end of file diff --git a/kubectl-plugin.html b/kubectl-plugin.html index 322cd31a..22a0b433 100644 --- a/kubectl-plugin.html +++ b/kubectl-plugin.html @@ -4,13 +4,13 @@ Kubectl Plugin | Operator for Redis Cluster - +
-

Kubectl Plugin

The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the official documentation for more details.

Installation

By default, the plugin will install in ~/.kube/plugins.

Run make plugin to install the plugin. After installation is complete, add the plugin to your PATH so kubectl can find it. By default, the plugin is installed to $HOME/.kube/plugins/rediscluster.

Alternatively, you can download the plugin manually from the assets tab on the releases page

Usage

Example usage:

kubectl rc
POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-rc1-node-for-redis-7jl8q 172.30.255.112 10.183.176.60 5478771ba4c34dbad9df8d30ac4bec5c9ba0842e wdc04 1023.75M 1.00G db0=669808 2731-5461
| rediscluster-rc1-node-for-redis-7q9jn 172.30.68.235 10.191.41.164 15c388f164ad0946691482c7de72939848ca86e2 wdc07 1023.76M 1.00G db0=669808
| rediscluster-rc1-node-for-redis-wjkk4 172.30.255.169 10.188.125.58 442de26f8cc9a011df307932a683177a5cd034a9 wdc06 1023.77M 1.00G db0=669808
+ rediscluster-rc1-node-for-redis-bmrgm 172.30.217.164 10.183.176.51 dd59697e82edf1554468f239f63ea1efd6718d4b wdc04 1023.78M 1.00G db0=669675 5462-8192
| rediscluster-rc1-node-for-redis-7lw4l 172.30.61.98 10.188.125.33 fc5718a7e9bd963f80b9cbd786bbc02d80b7f191 wdc06 1023.78M 1.00G db0=669675
| rediscluster-rc1-node-for-redis-qtzx8 172.30.188.104 10.191.41.140 8cd31d550d6935868d5da12ab78ddfb8c6fea1a2 wdc07 1023.78M 1.00G db0=669675
+ rediscluster-rc1-node-for-redis-dbrmg 172.30.140.228 10.183.176.53 c56420993afd35596425ae9e10a7e902cad5b6f8 wdc04 1023.78M 1.00G db0=670310 13655-16383
| rediscluster-rc1-node-for-redis-f4mxd 172.30.68.236 10.191.41.164 b3356e03f83227832662f1bc3bae50273e99059b wdc07 1023.82M 1.00G db0=670310
| rediscluster-rc1-node-for-redis-hhzdx 172.30.255.170 10.188.125.58 cc7bb986e42dd2fe9ead405e39a1a559b1b86e71 wdc06 1023.77M 1.00G db0=670310
+ rediscluster-rc1-node-for-redis-srxrs 172.30.188.105 10.191.41.140 31e880eef26377e719b28894a8fa469939b05a98 wdc07 1023.79M 1.00G db0=669522 10924-13654
| rediscluster-rc1-node-for-redis-hg6q4 172.30.255.113 10.183.176.60 bd9cd001e5bdcba527277e39f63908ea18504f75 wdc04 1023.80M 1.00G db0=669522
| rediscluster-rc1-node-for-redis-zgmqk 172.30.16.231 10.188.125.3 caaac384a8cd2cb6181925c889557bdb028aec0e wdc06 1023.81M 1.00G db0=669522
+ rediscluster-rc1-node-for-redis-szb9x 172.30.61.97 10.188.125.33 07de0c67262e816f7792f0a68c4c4a5b47291f46 wdc06 1023.80M 1.00G db0=669743 0-2730
| rediscluster-rc1-node-for-redis-gvkb4 172.30.217.165 10.183.176.51 f1ff1e5c0a77cb7b5850d8808f950c1303f96056 wdc04 1023.76M 1.00G db0=669743
| rediscluster-rc1-node-for-redis-znn4x 172.30.67.94 10.191.41.163 e0ab667b6629a660b01c20574750e3a487c69a1b wdc07 1023.78M 1.00G db0=669743
+ rediscluster-rc1-node-for-redis-xf8mr 172.30.67.95 10.191.41.163 8e9fe3e022f63f5fcc2a117edaadd93e39bfbb27 wdc07 1023.78M 1.00G db0=669711 8193-10923
| rediscluster-rc1-node-for-redis-r2qd2 172.30.16.232 10.188.125.3 0da8f5799b86c40f7ea0c3ebf71ce3955afa1dd1 wdc06 1023.79M 1.00G db0=669711
| rediscluster-rc1-node-for-redis-zfsxt 172.30.140.229 10.183.176.53 b4c22806c4c2529713e6e05b0d6fd994f0e801c7 wdc04 1023.78M 1.00G db0=669711

POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-rc2-node-for-redis-57g56 172.30.68.237 10.191.41.164 cc82878c2bdd6381d569709334db86ef6cfade19 wdc07 175.67M 1.00G db0=113397 5462-10923
+ rediscluster-rc2-node-for-redis-942rt 172.30.140.230 10.183.176.53 291f2f06d13a1d821219bc9acfbd5323be1552d9 wdc04 174.82M 1.00G db0=112681 10924-16383
+ rediscluster-rc2-node-for-redis-gqmhs 172.30.16.233 10.188.125.3 85f3c1c6652b5fb76b40c8bdffe3a9af57f22d4d wdc06 175.34M 1.00G db0=113014 0-5461

NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION
rc1-node-for-redis default 18/18/18 ClusterOK OK 6/6 2-2/2
rc2-node-for-redis default 3/3/3 ClusterOK OK 3/3 0-0/0

The example above illustrates the Redis cluster plugin's output when there are two clusters being managed by one Redis operator. The top portion of the output is referred to as the Redis cluster's status and the bottom portion refers to the Redis cluster's state.

Redis Cluster Status

The Redis cluster status provides an easy way to see how your cluster is set up and which replicas belong to which primary. Without the cluster state, it's really difficult to determine in which ZONE a Redis pod resides. The POD NAME, IP, and ID fields help when parsing logs because these properties are used differently depending on what part of the code is logging messages. The USED MEMORY and MAX MEMORY fields help determine if you should be seeing evictions, while the KEYS and SLOTS fields can help determine if you have any hot spots in your cluster.

Redis Cluster Status Fields

FieldDescription
POD NAMEName of Redis pod where the first character indicates its role. See the legend for more details
IPInternal IP address of the Redis pod
NODEIP address of the worker node on which the pod has been scheduled
IDRedis ID of the pod
ZONEZone of the worker node on which the pod has been scheduled
USED MEMORYHuman-readable representation of the total number of bytes allocated by Redis using its allocator
MAX MEMORYHuman-readable representation of Redis' maxmemory configuration directive
KEYSList of the number of keys in each database using the form db0:db0_key_count,db1:db1_key_count
SLOTSList of the slot ranges owned by this primary

Redis Cluster Pod Role Prefix Legend

PrefixDescription
+Primary
|Replica to the primary above
?Pod that matches the label selector but is not a part of the cluster yet
^Pod that is currently joining the cluster but has yet to be assigned a role

Redis Cluster State

The Redis cluster state provides additional information that is useful when trying to determine the cluster's health. It also indicates if a scaling, rebalancing, or rolling update operation is occurring.

Redis Cluster State Fields

FieldDescription
NAMERedis cluster name
NAMESPACENamespace of Redis cluster deployment
OPS STATUSClusterOK | Scaling | Rebalancing | RollingUpdate
REDIS STATUSOK | KO | Scaling | Calculating Rebalancing | Rebalancing | RollingUpdate
PODSCurrent number of ready pods / current number of total pods / desired number of pods
NB PRIMARYCurrent number of primaries / desired number of primaries
REPLICATIONCurrent min RF - current max RF / desired RF
ZONE SKEWPrimary node zone skew / replica node zone skew / BALANCED | UNBALANCED
- +

Kubectl Plugin

The Redis Operator kubectl plugin helps you visualise the status of your Redis cluster. Please visit the official documentation for more details.

Installation

By default, the plugin will install in ~/.kube/plugins.

Run make plugin to install the plugin. After installation is complete, add the plugin to your PATH so kubectl can find it. By default, the plugin is installed to $HOME/.kube/plugins/rediscluster.

Alternatively, you can download the plugin manually from the assets tab on the releases page

Usage

Example usage:

kubectl rc
POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-rc1-node-for-redis-7jl8q 172.30.255.112 10.183.176.60 5478771ba4c34dbad9df8d30ac4bec5c9ba0842e wdc04 1023.75M 1.00G db0=669808 2731-5461
| rediscluster-rc1-node-for-redis-7q9jn 172.30.68.235 10.191.41.164 15c388f164ad0946691482c7de72939848ca86e2 wdc07 1023.76M 1.00G db0=669808
| rediscluster-rc1-node-for-redis-wjkk4 172.30.255.169 10.188.125.58 442de26f8cc9a011df307932a683177a5cd034a9 wdc06 1023.77M 1.00G db0=669808
+ rediscluster-rc1-node-for-redis-bmrgm 172.30.217.164 10.183.176.51 dd59697e82edf1554468f239f63ea1efd6718d4b wdc04 1023.78M 1.00G db0=669675 5462-8192
| rediscluster-rc1-node-for-redis-7lw4l 172.30.61.98 10.188.125.33 fc5718a7e9bd963f80b9cbd786bbc02d80b7f191 wdc06 1023.78M 1.00G db0=669675
| rediscluster-rc1-node-for-redis-qtzx8 172.30.188.104 10.191.41.140 8cd31d550d6935868d5da12ab78ddfb8c6fea1a2 wdc07 1023.78M 1.00G db0=669675
+ rediscluster-rc1-node-for-redis-dbrmg 172.30.140.228 10.183.176.53 c56420993afd35596425ae9e10a7e902cad5b6f8 wdc04 1023.78M 1.00G db0=670310 13655-16383
| rediscluster-rc1-node-for-redis-f4mxd 172.30.68.236 10.191.41.164 b3356e03f83227832662f1bc3bae50273e99059b wdc07 1023.82M 1.00G db0=670310
| rediscluster-rc1-node-for-redis-hhzdx 172.30.255.170 10.188.125.58 cc7bb986e42dd2fe9ead405e39a1a559b1b86e71 wdc06 1023.77M 1.00G db0=670310
+ rediscluster-rc1-node-for-redis-srxrs 172.30.188.105 10.191.41.140 31e880eef26377e719b28894a8fa469939b05a98 wdc07 1023.79M 1.00G db0=669522 10924-13654
| rediscluster-rc1-node-for-redis-hg6q4 172.30.255.113 10.183.176.60 bd9cd001e5bdcba527277e39f63908ea18504f75 wdc04 1023.80M 1.00G db0=669522
| rediscluster-rc1-node-for-redis-zgmqk 172.30.16.231 10.188.125.3 caaac384a8cd2cb6181925c889557bdb028aec0e wdc06 1023.81M 1.00G db0=669522
+ rediscluster-rc1-node-for-redis-szb9x 172.30.61.97 10.188.125.33 07de0c67262e816f7792f0a68c4c4a5b47291f46 wdc06 1023.80M 1.00G db0=669743 0-2730
| rediscluster-rc1-node-for-redis-gvkb4 172.30.217.165 10.183.176.51 f1ff1e5c0a77cb7b5850d8808f950c1303f96056 wdc04 1023.76M 1.00G db0=669743
| rediscluster-rc1-node-for-redis-znn4x 172.30.67.94 10.191.41.163 e0ab667b6629a660b01c20574750e3a487c69a1b wdc07 1023.78M 1.00G db0=669743
+ rediscluster-rc1-node-for-redis-xf8mr 172.30.67.95 10.191.41.163 8e9fe3e022f63f5fcc2a117edaadd93e39bfbb27 wdc07 1023.78M 1.00G db0=669711 8193-10923
| rediscluster-rc1-node-for-redis-r2qd2 172.30.16.232 10.188.125.3 0da8f5799b86c40f7ea0c3ebf71ce3955afa1dd1 wdc06 1023.79M 1.00G db0=669711
| rediscluster-rc1-node-for-redis-zfsxt 172.30.140.229 10.183.176.53 b4c22806c4c2529713e6e05b0d6fd994f0e801c7 wdc04 1023.78M 1.00G db0=669711

POD NAME IP NODE ID ZONE USED MEMORY MAX MEMORY KEYS SLOTS
+ rediscluster-rc2-node-for-redis-57g56 172.30.68.237 10.191.41.164 cc82878c2bdd6381d569709334db86ef6cfade19 wdc07 175.67M 1.00G db0=113397 5462-10923
+ rediscluster-rc2-node-for-redis-942rt 172.30.140.230 10.183.176.53 291f2f06d13a1d821219bc9acfbd5323be1552d9 wdc04 174.82M 1.00G db0=112681 10924-16383
+ rediscluster-rc2-node-for-redis-gqmhs 172.30.16.233 10.188.125.3 85f3c1c6652b5fb76b40c8bdffe3a9af57f22d4d wdc06 175.34M 1.00G db0=113014 0-5461

NAME NAMESPACE PODS OPS STATUS REDIS STATUS NB PRIMARY REPLICATION
rc1-node-for-redis default 18/18/18 ClusterOK OK 6/6 2-2/2
rc2-node-for-redis default 3/3/3 ClusterOK OK 3/3 0-0/0

The example above illustrates the Redis cluster plugin's output when there are two clusters being managed by one Redis operator. The top portion of the output is referred to as the Redis cluster's status and the bottom portion refers to the Redis cluster's state.

Redis Cluster Status

The Redis cluster status provides an easy way to see how your cluster is set up and which replicas belong to which primary. Without the cluster state, it's really difficult to determine in which ZONE a Redis pod resides. The POD NAME, IP, and ID fields help when parsing logs because these properties are used differently depending on what part of the code is logging messages. The USED MEMORY and MAX MEMORY fields help determine if you should be seeing evictions, while the KEYS and SLOTS fields can help determine if you have any hot spots in your cluster.

Redis Cluster Status Fields

FieldDescription
POD NAMEName of Redis pod where the first character indicates its role. See the legend for more details
IPInternal IP address of the Redis pod
NODEIP address of the worker node on which the pod has been scheduled
IDRedis ID of the pod
ZONEZone of the worker node on which the pod has been scheduled
USED MEMORYHuman-readable representation of the total number of bytes allocated by Redis using its allocator
MAX MEMORYHuman-readable representation of Redis' maxmemory configuration directive
KEYSList of the number of keys in each database using the form db0:db0_key_count,db1:db1_key_count
SLOTSList of the slot ranges owned by this primary

Redis Cluster Pod Role Prefix Legend

PrefixDescription
+Primary
|Replica to the primary above
?Pod that matches the label selector but is not a part of the cluster yet
^Pod that is currently joining the cluster but has yet to be assigned a role

Redis Cluster State

The Redis cluster state provides additional information that is useful when trying to determine the cluster's health. It also indicates if a scaling, rebalancing, or rolling update operation is occurring.

Redis Cluster State Fields

FieldDescription
NAMERedis cluster name
NAMESPACENamespace of Redis cluster deployment
OPS STATUSClusterOK | Scaling | Rebalancing | RollingUpdate
REDIS STATUSOK | KO | Scaling | Calculating Rebalancing | Rebalancing | RollingUpdate
PODSCurrent number of ready pods / current number of total pods / desired number of pods
NB PRIMARYCurrent number of primaries / desired number of primaries
REPLICATIONCurrent min RF - current max RF / desired RF
ZONE SKEWPrimary node zone skew / replica node zone skew / BALANCED | UNBALANCED
+ \ No newline at end of file diff --git a/rolling-update.html b/rolling-update.html index 47be70b1..8807f0ac 100644 --- a/rolling-update.html +++ b/rolling-update.html @@ -4,13 +4,13 @@ Rolling Update Procedure | Operator for Redis Cluster - +
-

Rolling Update Procedure

Overview

In production, developers aim for zero downtime when periodically deploying newer versions of their application. Per Kubernetes documentation:

rolling updates allow Deployments' update to take place with zero downtime by incrementally updating Pods instances with new ones

To learn more about how rolling updates work in k8s, see Performing a Rolling Update.

Redis cluster upgrades

A rolling update occurs when the user applies a change to the Redis cluster pod template spec. For example, a user might update the Redis cluster pod image tag in charts/node-for-redis/values.yaml and run helm upgrade. When the Redis operator detects the pod template spec change, the following procedure takes place:

  1. Compare the number of running Redis pods with the number of pods required for the rolling update:
    # migration pods = 1 + replication factor
    # required pods = # primaries x # migration pods
    # pods to create = # required pods + # migration pods - # of running pods
    where # migration pods is the number of pods needed to migrate one primary and all of its replicas, # required pods is the total number of pods required for the cluster, and # pods to create is the number of pods to create on a single rolling update iteration.
  2. If # pods to create > 0, create additional pods with the new pod template spec.
  3. Separate old nodes and new nodes according to their pod spec hash annotation.
  4. Select the old primary node to replace with one of the newly created pods.
  5. Generate the primary to replicas mapping for the newly created pods.
  6. Attach the new replicas to the new primary.
  7. Migrate slots (and by default, keys) from the old primary to the new primary.
  8. Detach, forget, and delete the old pods.

The Redis cluster rolling update procedure ensures that there is no downtime as new nodes replace old ones. However, because the migration of keys from old primaries to new ones is a time intensive operation, you may see a temporary decrease in the performance of your cluster during this process. To learn more about step 7, see key migration.

Resource limitations

This procedure requires additional resources beyond what is normally allocated to the Redis cluster. More specifically, this procedure creates an extra 1 + replication factor pods on each rolling update iteration, so you will need ensure that you have allocated sufficient resources. For standard configurations that allow multiple pods per node, you may need to increase memory + cpu on your existing nodes. If you have configured your cluster topology to limit one Redis pod per k8s node, you may need to increase the number of k8s nodes in your worker pool.

In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in Pending state. This state is difficult to recover from because the Redis operator will continue to apply the rolling update procedure until it completes. If you find your newly created pods are in Pending state, increase the allocated memory + cpus.

- +

Rolling Update Procedure

Overview

In production, developers aim for zero downtime when periodically deploying newer versions of their application. Per Kubernetes documentation:

rolling updates allow Deployments' update to take place with zero downtime by incrementally updating Pods instances with new ones

To learn more about how rolling updates work in k8s, see Performing a Rolling Update.

Redis cluster upgrades

A rolling update occurs when the user applies a change to the Redis cluster pod template spec. For example, a user might update the Redis cluster pod image tag in charts/node-for-redis/values.yaml and run helm upgrade. When the Redis operator detects the pod template spec change, the following procedure takes place:

  1. Compare the number of running Redis pods with the number of pods required for the rolling update:
    # migration pods = 1 + replication factor
    # required pods = # primaries x # migration pods
    # pods to create = # required pods + # migration pods - # of running pods
    where # migration pods is the number of pods needed to migrate one primary and all of its replicas, # required pods is the total number of pods required for the cluster, and # pods to create is the number of pods to create on a single rolling update iteration.
  2. If # pods to create > 0, create additional pods with the new pod template spec.
  3. Separate old nodes and new nodes according to their pod spec hash annotation.
  4. Select the old primary node to replace with one of the newly created pods.
  5. Generate the primary to replicas mapping for the newly created pods.
  6. Attach the new replicas to the new primary.
  7. Migrate slots (and by default, keys) from the old primary to the new primary.
  8. Detach, forget, and delete the old pods.

The Redis cluster rolling update procedure ensures that there is no downtime as new nodes replace old ones. However, because the migration of keys from old primaries to new ones is a time intensive operation, you may see a temporary decrease in the performance of your cluster during this process. To learn more about step 7, see key migration.

Resource limitations

This procedure requires additional resources beyond what is normally allocated to the Redis cluster. More specifically, this procedure creates an extra 1 + replication factor pods on each rolling update iteration, so you will need ensure that you have allocated sufficient resources. For standard configurations that allow multiple pods per node, you may need to increase memory + cpu on your existing nodes. If you have configured your cluster topology to limit one Redis pod per k8s node, you may need to increase the number of k8s nodes in your worker pool.

In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in Pending state. This state is difficult to recover from because the Redis operator will continue to apply the rolling update procedure until it completes. If you find your newly created pods are in Pending state, increase the allocated memory + cpus.

+ \ No newline at end of file diff --git a/scaling.html b/scaling.html index bbc0c3b3..5d50c57d 100644 --- a/scaling.html +++ b/scaling.html @@ -4,13 +4,13 @@ Scaling Operations | Operator for Redis Cluster - +
-

Scaling Operations

Overview

There are many reasons why you would want to scale the number of Redis nodes in your cluster. A few of the most common reasons are:

  • Memory pressure - the nodes in your cluster are close to full capacity (or are at fully capacity and evictions are causing the backend to take more traffic than desired)
    • Horizontally scale the number of primaries to better serve requests
    • Vertically scale your current Redis nodes by allocating more memory
  • CPU bottleneck - throughput is low and impacting system performance
    • Horizontally scale the number of primaries to better serve requests
    • Vertically scale your current Redis nodes by allocating more CPUs
  • Over-provisioning - you have allocated too many resources for your cluster
    • Scale down if it does not hurt the performance of your system
    • Scale down the number of primaries to save on costs
    • If you are running a Redis cluster with a high replication factor (RF), consider reducing it
    • In multi-zone clusters, scaling down may reduce availability in the case of a zone outage

Impact of scaling

Scaling operations happen in real-time while the Redis cluster receives requests. They are computationally intensive, so expect a decrease in performance while the scaling operation takes place. The extent of the performance impact depends on the size of the data stored in Redis, as well as CPU utilization. See key migration for more information about how Redis keys are migrated from one primary to another during the scaling process.

Resource Requirements

Like the rolling update procedure, scaling up requires additional resources to create new Redis pods. In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in Pending state. If you find your newly created pods are in Pending state, increase the memory + cpu allocated to your k8s nodes, or add more nodes to your worker pool.

Scaling primaries

The first option for scaling your cluster is scaling the number of primaries. You can trigger a scaling operation by modifying the numberOfPrimaries field in charts/node-for-redis/values.yaml and running helm upgrade on your cluster.

Scaling up

Scale up operations take place when the desired number of primaries is greater than the current number of primaries. We take the following actions for scale up operations:

For the number of primaries added,
1. Create a new Redis pod.
2. Wait for the pod to become ready.

Once the number of desired Redis pods matches the current number of running pods,
3. Check if we have sufficient primaries. If not, promote replicas to primaries. This only happens when you scale up the number of primaries AND scale down RF.
4. Add the new Redis nodes to the selection of current primaries.
5. Place and attach replicas to their respective primaries.
6. Dispatch slots and migrate keys to the new primaries.

After this last step, your cluster will be in normal operating state. The primaries nodes will have an equal number of slots and replica nodes will be properly attached.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Assuming your helm release name is redis-cluster, scale up numberOfPrimaries by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5

Scaling down

Scale down operations take place when the desired number of primaries is less than the current number of primaries. We take the following actions for scale down operations:

For the number of primaries deleted,
1. Select one primary to remove.
2. Migrate keys from the primary to be removed to the other primaries. Slots are equally distributed across the remaining primaries.
3. Detach, forget, and delete the primary to be removed.

Once the number of desired Redis pods matches the current number of running pods,
4. Dispatch slots and migrate keys to the new primaries.
5. Place and attach replicas to their respective primaries.

After this last step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 5
replicationFactor: 1

Scale down numberOfPrimaries by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=3

Scaling replication factor

The second option for scaling your cluster is scaling RF. You can trigger a scaling operation by modifying the replicationFactor field in charts/node-for-redis/values.yaml and running helm upgrade on your cluster.

Scaling up

Scale up operations for RF take place when the desired RF is greater than the current RF. We take the following actions for scale up operations:

For the number of replicas added,
1. Create a new Redis pod.
2. Wait for the pod to become ready.

Once the number of desired Redis pods matches the current number of running pods,
3. Add the new Redis nodes to the selection of replicas.
4. Place and attach replicas to their respective primaries such that each primary has the same number of replicas.
5. Dispatch slots and migrate keys to the new primaries.

After this step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Scale up replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=2

Scaling down

Scale down operations for RF take place when the desired RF is less than the current RF. We take the following actions for scale down operations:

For the number of replicas deleted,
For each primary in the cluster,
1. Calculate the difference between the current RF and desired RF.
2. If we do not have sufficient replicas for this primary, select new replicas and attach them to the primary.
3. If we have too many replicas, select replicas to delete. Detach, forget, and delete the replica to be removed.

Once the number of desired Redis pods matches the current number of running pods,
4. Place and attach replicas to their respective primaries.

After this step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 2

Scale down replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=1

Scaling primaries and replication factor

You may scale both the number of primaries and replication factor in a single helm upgrade command. The number of pods created or deleted will be calculated and actions will be taken according to the algorithms described in the previous sections. The following is an example of scaling up numberOfPrimaries and replicationFactor.

Example 1

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Increase numberOfPrimaries and replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=4 --set replicationFactor=2

Example 2

You may also scale up one field while scaling down the other:

numberOfPrimaries: 4
replicationFactor: 2

Increase numberOfPrimaries and decrease replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5 --set replicationFactor=1
- +

Scaling Operations

Overview

There are many reasons why you would want to scale the number of Redis nodes in your cluster. A few of the most common reasons are:

  • Memory pressure - the nodes in your cluster are close to full capacity (or are at fully capacity and evictions are causing the backend to take more traffic than desired)
    • Horizontally scale the number of primaries to better serve requests
    • Vertically scale your current Redis nodes by allocating more memory
  • CPU bottleneck - throughput is low and impacting system performance
    • Horizontally scale the number of primaries to better serve requests
    • Vertically scale your current Redis nodes by allocating more CPUs
  • Over-provisioning - you have allocated too many resources for your cluster
    • Scale down if it does not hurt the performance of your system
    • Scale down the number of primaries to save on costs
    • If you are running a Redis cluster with a high replication factor (RF), consider reducing it
    • In multi-zone clusters, scaling down may reduce availability in the case of a zone outage

Impact of scaling

Scaling operations happen in real-time while the Redis cluster receives requests. They are computationally intensive, so expect a decrease in performance while the scaling operation takes place. The extent of the performance impact depends on the size of the data stored in Redis, as well as CPU utilization. See key migration for more information about how Redis keys are migrated from one primary to another during the scaling process.

Resource Requirements

Like the rolling update procedure, scaling up requires additional resources to create new Redis pods. In the case where there are insufficient resources to schedule new Redis pods, the pods will get stuck in Pending state. If you find your newly created pods are in Pending state, increase the memory + cpu allocated to your k8s nodes, or add more nodes to your worker pool.

Scaling primaries

The first option for scaling your cluster is scaling the number of primaries. You can trigger a scaling operation by modifying the numberOfPrimaries field in charts/node-for-redis/values.yaml and running helm upgrade on your cluster.

Scaling up

Scale up operations take place when the desired number of primaries is greater than the current number of primaries. We take the following actions for scale up operations:

For the number of primaries added,
1. Create a new Redis pod.
2. Wait for the pod to become ready.

Once the number of desired Redis pods matches the current number of running pods,
3. Check if we have sufficient primaries. If not, promote replicas to primaries. This only happens when you scale up the number of primaries AND scale down RF.
4. Add the new Redis nodes to the selection of current primaries.
5. Place and attach replicas to their respective primaries.
6. Dispatch slots and migrate keys to the new primaries.

After this last step, your cluster will be in normal operating state. The primaries nodes will have an equal number of slots and replica nodes will be properly attached.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Assuming your helm release name is redis-cluster, scale up numberOfPrimaries by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5

Scaling down

Scale down operations take place when the desired number of primaries is less than the current number of primaries. We take the following actions for scale down operations:

For the number of primaries deleted,
1. Select one primary to remove.
2. Migrate keys from the primary to be removed to the other primaries. Slots are equally distributed across the remaining primaries.
3. Detach, forget, and delete the primary to be removed.

Once the number of desired Redis pods matches the current number of running pods,
4. Dispatch slots and migrate keys to the new primaries.
5. Place and attach replicas to their respective primaries.

After this last step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 5
replicationFactor: 1

Scale down numberOfPrimaries by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=3

Scaling replication factor

The second option for scaling your cluster is scaling RF. You can trigger a scaling operation by modifying the replicationFactor field in charts/node-for-redis/values.yaml and running helm upgrade on your cluster.

Scaling up

Scale up operations for RF take place when the desired RF is greater than the current RF. We take the following actions for scale up operations:

For the number of replicas added,
1. Create a new Redis pod.
2. Wait for the pod to become ready.

Once the number of desired Redis pods matches the current number of running pods,
3. Add the new Redis nodes to the selection of replicas.
4. Place and attach replicas to their respective primaries such that each primary has the same number of replicas.
5. Dispatch slots and migrate keys to the new primaries.

After this step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Scale up replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=2

Scaling down

Scale down operations for RF take place when the desired RF is less than the current RF. We take the following actions for scale down operations:

For the number of replicas deleted,
For each primary in the cluster,
1. Calculate the difference between the current RF and desired RF.
2. If we do not have sufficient replicas for this primary, select new replicas and attach them to the primary.
3. If we have too many replicas, select replicas to delete. Detach, forget, and delete the replica to be removed.

Once the number of desired Redis pods matches the current number of running pods,
4. Place and attach replicas to their respective primaries.

After this step, your cluster will be in normal operating state.

Example

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 2

Scale down replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set replicationFactor=1

Scaling primaries and replication factor

You may scale both the number of primaries and replication factor in a single helm upgrade command. The number of pods created or deleted will be calculated and actions will be taken according to the algorithms described in the previous sections. The following is an example of scaling up numberOfPrimaries and replicationFactor.

Example 1

Given a Redis cluster with the following config:

numberOfPrimaries: 3
replicationFactor: 1

Increase numberOfPrimaries and replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=4 --set replicationFactor=2

Example 2

You may also scale up one field while scaling down the other:

numberOfPrimaries: 4
replicationFactor: 2

Increase numberOfPrimaries and decrease replicationFactor by running the following:

helm upgrade redis-cluster charts/node-for-redis --set numberOfPrimaries=5 --set replicationFactor=1
+ \ No newline at end of file