diff --git a/404.html b/404.html index 134f0efb9b..5860ccc0b8 100644 --- a/404.html +++ b/404.html @@ -4,7 +4,7 @@ The Home of Redis Developers - + @@ -12,7 +12,7 @@
Skip to main content
- + \ No newline at end of file diff --git a/assets/js/470c62dd.c6d06eca.js b/assets/js/470c62dd.b9f6a93b.js similarity index 99% rename from assets/js/470c62dd.c6d06eca.js rename to assets/js/470c62dd.b9f6a93b.js index 6bdf0f5e17..f1daf9e457 100644 --- a/assets/js/470c62dd.c6d06eca.js +++ b/assets/js/470c62dd.b9f6a93b.js @@ -1 +1 @@ -"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[2801],{85162:(e,t,a)=>{a.d(t,{Z:()=>r});var i=a(67294),s=a(86010);const n="tabItem_Ymn6";function r(e){let{children:t,hidden:a,className:r}=e;return i.createElement("div",{role:"tabpanel",className:(0,s.Z)(n,r),hidden:a},t)}},65488:(e,t,a)=>{a.d(t,{Z:()=>h});var i=a(87462),s=a(67294),n=a(86010),r=a(72389),o=a(67392),l=a(7094),d=a(12466);const c="tabList__CuJ",p="tabItem_LNqP";function u(e){var t;const{lazy:a,block:r,defaultValue:u,values:h,groupId:m,className:v}=e,g=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),A=h??g.map((e=>{let{props:{value:t,label:a,attributes:i}}=e;return{value:t,label:a,attributes:i}})),k=(0,o.l)(A,((e,t)=>e.value===t.value));if(k.length>0)throw new Error(`Docusaurus error: Duplicate values "${k.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const f=null===u?u:u??(null==(t=g.find((e=>e.props.default)))?void 0:t.props.value)??g[0].props.value;if(null!==f&&!A.some((e=>e.value===f)))throw new Error(`Docusaurus error: The has a defaultValue "${f}" but none of its children has the corresponding value. Available values are: ${A.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:b,setTabGroupChoices:y}=(0,l.U)(),[w,E]=(0,s.useState)(f),N=[],{blockElementScrollPositionUntilNextRender:Z}=(0,d.o5)();if(null!=m){const e=b[m];null!=e&&e!==w&&A.some((t=>t.value===e))&&E(e)}const x=e=>{const t=e.currentTarget,a=N.indexOf(t),i=A[a].value;i!==w&&(Z(t),E(i),null!=m&&y(m,String(i)))},J=e=>{var t;let a=null;switch(e.key){case"Enter":x(e);break;case"ArrowRight":{const t=N.indexOf(e.currentTarget)+1;a=N[t]??N[0];break}case"ArrowLeft":{const t=N.indexOf(e.currentTarget)-1;a=N[t]??N[N.length-1];break}}null==(t=a)||t.focus()};return s.createElement("div",{className:(0,n.Z)("tabs-container",c)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,n.Z)("tabs",{"tabs--block":r},v)},A.map((e=>{let{value:t,label:a,attributes:r}=e;return s.createElement("li",(0,i.Z)({role:"tab",tabIndex:w===t?0:-1,"aria-selected":w===t,key:t,ref:e=>N.push(e),onKeyDown:J,onClick:x},r,{className:(0,n.Z)("tabs__item",p,null==r?void 0:r.className,{"tabs__item--active":w===t})}),a??t)}))),a?(0,s.cloneElement)(g.filter((e=>e.props.value===w))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},g.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==w})))))}function h(e){const t=(0,r.Z)();return s.createElement(u,(0,i.Z)({key:String(t)},e))}},71131:(e,t,a)=>{a.d(t,{Z:()=>m});var i=a(67294),s=a(3905),n=a(52195);const r="riContainer_bco2",o="riDescriptionShort_E27B",l="riDetail_wzFs",d="riIcon_yDou",c="riTitle_x6vI",p="riDescription_RDnu",u="riMore_apRP";var h=a(86010);const m=e=>{const[t,a]=i.useState(!1);return i.createElement("a",{href:e.page,className:r},i.createElement("div",{className:o},i.createElement("div",{className:d},i.createElement("span",{className:"fe fe-zap"})),i.createElement("div",{className:l},i.createElement("div",{className:c},i.createElement("a",{href:e.page},e.title)),i.createElement("div",{className:p},e.description,i.Children.count(e.children)>0&&i.createElement("span",{className:(0,h.Z)(u,"fe","fe-more-horizontal"),onClick:()=>a(!t)})))),t&&i.createElement("div",{className:"ri-description-long"},i.createElement(s.Zo,{components:n.Z},e.children)))}},14020:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>l,contentTitle:()=>r,default:()=>p,frontMatter:()=>n,metadata:()=>o,toc:()=>d});var i=a(87462),s=(a(67294),a(3905));a(65488),a(85162),a(44996),a(71131);const n={id:"index-gettingstarted",title:"Java and Redis",sidebar_label:"Overview",slug:"/develop/java/getting-started"},r=void 0,o={unversionedId:"develop/java/getting-started/index-gettingstarted",id:"develop/java/getting-started/index-gettingstarted",title:"Java and Redis",description:"Find tutorials, examples and technical articles that will help you to develop with Redis and Java.",source:"@site/docs/develop/java/getting-started/index.md",sourceDirName:"develop/java/getting-started",slug:"/develop/java/getting-started",permalink:"/develop/java/getting-started",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/develop/java/getting-started/index.md",tags:[],version:"current",lastUpdatedAt:1698353241,formattedLastUpdatedAt:"Oct 26, 2023",frontMatter:{id:"index-gettingstarted",title:"Java and Redis",sidebar_label:"Overview",slug:"/develop/java/getting-started"}},l={},d=[{value:"Getting Started",id:"getting-started",level:2},{value:"Run a Redis server",id:"run-a-redis-server",level:3},{value:"Using Jedis",id:"using-jedis",level:3},{value:"Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:",id:"step-1-add-dependencies-jedis-dependency-to-your-maven-or-gradle-project-file",level:3},{value:"Step 2. Import the required classes",id:"step-2-import-the-required-classes",level:3},{value:"Step 3. Create a Connection Pool",id:"step-3-create-a-connection-pool",level:3},{value:"Step 4. Write your application code",id:"step-4-write-your-application-code",level:3},{value:"Redis Launchpad",id:"redis-launchpad",level:3},{value:"Movie Database app in Java",id:"movie-database-app-in-java",level:4},{value:"Leaderboard app in Java",id:"leaderboard-app-in-java",level:4},{value:"Ecosystem",id:"ecosystem",level:3},{value:"Develop with Spring",id:"develop-with-spring",level:4},{value:"Develop with Quarkus",id:"develop-with-quarkus",level:4},{value:"Develop with Vert.x",id:"develop-with-vertx",level:4},{value:"Develop with Micronaut",id:"develop-with-micronaut",level:4},{value:"More developer resources",id:"more-developer-resources",level:3},{value:"Redis University",id:"redis-university",level:3},{value:"Redis for Java Developers",id:"redis-for-java-developers",level:3}],c={toc:d};function p(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,i.Z)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,s.kt)("p",null,"Find tutorials, examples and technical articles that will help you to develop with Redis and Java."),(0,s.kt)("h2",{id:"getting-started"},"Getting Started"),(0,s.kt)("p",null,"Java community has built many client libraries that you can find ",(0,s.kt)("a",{parentName:"p",href:"https://redis.io/clients#java"},"here"),". For your first steps with Java and Redis, this article will show how to use ",(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis/jedis"},"Jedis"),", the supported Redis client for Java."),(0,s.kt)("p",null,"Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT."),(0,s.kt)("h3",{id:"run-a-redis-server"},"Run a Redis server"),(0,s.kt)("p",null,"You can either run Redis in a Docker container or directly on your machine.\nUse these commands to setup a Redis server locally on Mac OS:"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre"}," brew tap redis-stack/redis-stack\n brew install --cask redis-stack\n")),(0,s.kt)("admonition",{title:"INFO",type:"info"},(0,s.kt)("p",{parentName:"admonition"},"Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack provides the following in addition to Redis Open Source: JSON, Search, Time Series, and Probabilistic data structures.")),(0,s.kt)("p",null,"Ensure that you are able to use the following Redis command to connect to the Redis instance."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-bash"}," redis-cli\n localhost>\n")),(0,s.kt)("h3",{id:"using-jedis"},"Using Jedis"),(0,s.kt)("h3",{id:"step-1-add-dependencies-jedis-dependency-to-your-maven-or-gradle-project-file"},"Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-xml"}," \n redis.clients\n jedis\n 3.4.0\n \n")),(0,s.kt)("h3",{id:"step-2-import-the-required-classes"},"Step 2. Import the required classes"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"}," import redis.clients.jedis.*;\n")),(0,s.kt)("h3",{id:"step-3-create-a-connection-pool"},"Step 3. Create a Connection Pool"),(0,s.kt)("p",null,"Once you have added the Jedis library to your project and imported the necessary classes you can create a connection pool."),(0,s.kt)("p",null,"You can find more information about Jedis connection pool in the ",(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis/jedis/wiki/Getting-started#basic-usage-example"},"Jedis Wiki"),". The connection pool is based on the ",(0,s.kt)("a",{parentName:"p",href:"http://commons.apache.org/proper/commons-pool/apidocs/org/apache/commons/pool2/impl/GenericObjectPoolConfig.html"},"Apache Common Pool 2.0 library"),"."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},' JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);\n')),(0,s.kt)("h3",{id:"step-4-write-your-application-code"},"Step 4. Write your application code"),(0,s.kt)("p",null,"Once you have access to the connection pool you can now get a Jedis instance and start to interact with your Redis instance."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},' // Create a Jedis connection pool\n JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);\n\n // Get the pool and use the database\n try (Jedis jedis = jedisPool.getResource()) {\n\n jedis.set("mykey", "Hello from Jedis");\n String value = jedis.get("mykey");\n System.out.println( value );\n\n jedis.zadd("vehicles", 0, "car");\n jedis.zadd("vehicles", 0, "bike");\n Set vehicles = jedis.zrange("vehicles", 0, -1);\n System.out.println( vehicles );\n\n }\n\n // close the connection pool\n jedisPool.close();\n')),(0,s.kt)("p",null,'Find more information about Java & Redis connections in the "',(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis-developer/redis-connect/tree/master/java/jedis"},"Redis Connect"),'".'),(0,s.kt)("h3",{id:"redis-launchpad"},"Redis Launchpad"),(0,s.kt)("p",null,"Redis Launchpad is like an \u201cApp Store\u201d for Redis sample apps. You can easily find apps for your preferred frameworks and languages.\nCheck out a few of these apps below, or ",(0,s.kt)("a",{parentName:"p",href:"https://launchpad.redis.com"},"click here to access the complete list"),"."),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col "},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"movie-database-app-in-java"},"Movie Database app in Java"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"launchpad",src:a(17970).Z,width:"2274",height:"1486"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"http://launchpad.redis.com/?id=project%3Ademo-movie-app-redisearch-java"},"Movie Database app in Java")," based on Search capabilities"))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"leaderboard-app-in-java"},"Leaderboard app in Java"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"launchpad",src:a(6116).Z,width:"1262",height:"1010"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"http://launchpad.redis.com/?id=project%3Abasic-redis-leaderboard-demo-java"},"How to implement leaderboard app")," using Redis & Java(Spring)")))),(0,s.kt)("h3",{id:"ecosystem"},"Ecosystem"),(0,s.kt)("p",null,"As developer you can use the Java client library directly in your application, or you can frameworks like: ",(0,s.kt)("a",{parentName:"p",href:"https://spring.io/"},"Spring"),", ",(0,s.kt)("a",{parentName:"p",href:"https://quarkus.io/"},"Quarkus"),", ",(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/"},"Vert.x"),", and ",(0,s.kt)("a",{parentName:"p",href:"https://micronaut.io/"},"Micronaut"),"."),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col "},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-spring"},"Develop with Spring"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Spring logo",src:a(66725).Z,width:"800",height:"206"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://spring.io/projects/spring-data-redis"},"Spring Data Redis"),", part of the larger Spring Data project. It provides easy access to Redis from Spring applications."))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-quarkus"},"Develop with Quarkus"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Quarkus logo",src:a(50187).Z,width:"1281",height:"196"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://quarkus.io/guides/redis"},"Redis Client extension")," allows you to connect your Quarkus application to a Redis instance.")))),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-vertx"},"Develop with Vert.x"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Vert.x logo",src:a(26364).Z,width:"192",height:"84"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/introduction-to-vertx-and-reactive/"},"Eclipse Vert.x")," is a framework to build reactive applications on the JVM. ",(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/docs/vertx-redis-client/java/"},"Vert.x-redis")," is redis client to be used with Vert.x."))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-micronaut"},"Develop with Micronaut"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Micronaut logo",src:a(61110).Z,width:"1315",height:"298"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://micronaut.io/"},"Micronaut")," is a framework for building microservices and serverless applications. The ",(0,s.kt)("a",{parentName:"p",href:"https://micronaut-projects.github.io/micronaut-redis/snapshot/guide/"},"Micronaut Redis")," extension provides the integration with Redis.")))),(0,s.kt)("hr",null),(0,s.kt)("h3",{id:"more-developer-resources"},"More developer resources"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/brewdis"},"Brewdis - Product Catalog (Spring)")),"\nSee how to use Redis and Spring to build a product catalog with streams, hashes and Search"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/redis-streams-in-action"},"Redis Stream in Action (Spring)")),"\nSee how to use Spring to create multiple producer and consumers with Redis Streams"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/vertx-rate-limiting-redis"},"Rate Limiting with Vert.x")),"\nSee how to use Redis Sorted Set with Vert.x to build a rate limiting service."),(0,s.kt)("h3",{id:"redis-university"},"Redis University"),(0,s.kt)("h3",{id:"redis-for-java-developers"},(0,s.kt)("a",{parentName:"h3",href:"https://university.redis.com/courses/ru102j/"},"Redis for Java Developers")),(0,s.kt)("p",null,"Redis for Java Developers teaches you how to build robust Redis client applications in Java using the Jedis client library. The course focuses on writing idiomatic Java applications with the Jedis API, describing language-specific patterns for managing Redis database connections, handling errors, and using standard classes from the JDK. The course material uses the Jedis API directly with no additional frameworks. As such, the course is appropriate for all Java developers, and it clearly illustrates the principles involved in writing applications with Redis."),(0,s.kt)("div",{class:"text--center"},(0,s.kt)("iframe",{width:"560",height:"315",src:"https://www.youtube.com/embed/CmQMdJefTjc",frameborder:"0",allow:"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture",allowfullscreen:!0})),(0,s.kt)("div",null,(0,s.kt)("a",{href:"https://launchpad.redis.com",target:"_blank",rel:"noopener",className:"link"}," ",(0,s.kt)("img",{src:"/img/launchpad.png",className:"thumb",loading:"lazy",alt:"Redis Launchpad"}))))}p.isMDXComponent=!0},6116:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/basicleaderboardjava-71718a9cf1490e5d50bebbe7c0074b7d.png"},17970:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/moviedatabasejava-b17f667506e72ebe506f8e0191d5950e.png"},61110:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/micronaut-dc77eb63819d66d67d3af937bc6e7061.svg"},50187:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/quarkus-d70048326a3d8d1251647944ca959f8c.png"},66725:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/spring-840a3a0aafc4886b3a2bb441b6d4beda.png"},26364:(e,t,a)=>{a.d(t,{Z:()=>i});const i="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMAAAABUCAYAAADUKzhSAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAMCgAwAEAAAAAQAAAFQAAAAAmofVGQAAAAlwSFlzAAALEwAACxMBAJqcGAAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTMInWQAAIxtJREFUeAHtXQmYXFWVvm+p6u6kO4jQonwoiCAknUSZhnRWaHAcBQcFpBHIANlIZsjiN87nrkwpIzoOLpBlICuEbUz7KZ+sDo6UZO1gi0A6REBANpUgDtm6u+ot8//3vltdVf1edVV1dQjMu0l1vXffveeee945555z7lJCxCmmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKXDIU8CoGsP2dlvs3Tu4fne3C5he1XBLVzREa6sdWqS7Oys6Oizx7LNm6PNaZBb0DW1VmzpQUePZfTxo1UmalUoG+maKzlJFRuJZJ9+jj//G/NaV4XQPafbopg/6qfSZTsijsrNS7Q/Zr+x9ajB/RUBY2b2A7QHXQyONBBOWTYwRJsFI4EHBTgDvMNhheSPcxQHwKZGq6l36EJoBKJVeVV234jYrrqC7Yo2f/Bnf948whJGlhsC783xfNODPA+LJrqdRjoSr1UhAPH3R0tJoiaaLfd+zDMOQ0g4ccClcd9yx6+xdL57pud5YwzB7UbyqF6f7V/htoGt+0nONe8Wubc+LE9rGmHXiJuS9A+WgeYCBID6SDsE3ISgcC/N9DzQ7gId/xPdO1/C7xI6ux1laJo6s6bTSnhzROjtdu+W0j/vC+JInjDcAM6Rf+Qyj8SA0fa3xYF5xWZ2ny8jnHJEamyz7q3se37xt9unfe6/pJS/wfXMfQEaPfHgnhu8l8EpeW7t50YaBfrONspN817OnLf2MaYgjADCLV8m88AS2w/OkXZ+4beUvFoA++h2EFy/OLXtYy1UMXpDv+X8v6ur/wXfA/3z/YBFhJ4SR6fsJrj4t2ttNvMjaCADNHpg4lt/Y4dclVwnXgTToNm3hZ/ofB6Os9lsmfU3UNZyRwymH9HAv0CPTFpaf2Q3OeF7UeaOQcaFhJdA4nkW/noiGVQXUFKYHiOPbfgP2u8nt6VopmZ+jAU06ZSa5YPwTRLL+DEPTOgJqzbKJmGWJvqx7DGGu23/Hn+Y2zLl4TMORk3uze4VpQgZYZlBSmZaZEHOnL2tcs8lY29GSSnb2pDKDioZk6LKzpy2bN7rusFWOlw1IG05gz3dEQ/Iwsaf3z3eB+ZdztEpBEYeAjsyqXACam2UvPeF9x8z0XQrI1Baq51kXmlmc7U9sOwYv8iXk12YUULY3GW2uIBN4HjS8sHHvCMdvAHmuwT2Hmz2GA1r7eO7hee2SKzy/Hu31SZBWwsOY8zoE8V24B0LBe6qsPc1CNjjqb3zTvMlsaVvimeIK0d3VDQVC/GUZQ5h9voNBwfPYfi37FYGxj3fq12UMV/UXwjhmysyz3uh79SmMrsf4rs/8qJGADFgHzFfNmbZi89rNV/2uvT1lp9MpNapFtJhCmVQ6lZk1ZenJ4KGbDmSgzIXoxydkxOMj4ZiG1fBG76s96zYtPl/mVPEnCng0qE7pGJli5yM9GHs2CtOi7UpiJNFpD5qywXR9CoaAXRtFJPm4rD80A8jbE9paYexMAxOQKRrwQb7ZgPsXXbHvAQULQ6UcjQRxquHHT0q4HthzIEEApWYiQ1bz0fgZ6IMDYcpCEFpM3/i1NW5ShxwJ+vtZBgnm3PDaqhA/A+UNjHim7O+5rfNH/WDr53qBxuWWIVGqA1Ia/+JvPPOdhF0HnD2YQQJdSTnglXA1zt7hGZhfCohpGRsSFupK0xKCFNoO8RMNqAeSeZfhWtBpTolURdqf9fJfKO/LSb5mbNMwVgcvRmszE9oXMIwrJCAO49VpxwE8gmiJ6Yk5EDbma03iove8v1309OzjBUgZRQCOUqxX5cdAPXQx8DvYUkQiHdiGbo/f+pOfz2vShs+Y+EITEATYu3itlrXBmtB2jti6lSMdEoZ1mlrR+Os2+K3bYfnilI+fLqvL8zu4hm3B/gZ/7m5amZnfelNi7eaFD2W9zL/VJ0aTmUuYNYaddfsyKDdx9oylSwloQYkokn42F2VRZwLrogppEpF8p84eDZJ4/7Lm4SWP0nSqNupUjQAI2KcklHDN3p9Cc/0JjEFkyXymgBcKxhxnj538tyyDoXw4o4ApbeHW1sMA6SLAJkTC49tJ0BfwTPMWZpZMJgxa04KpUe3HTApUhVIMNHJoa3BLoKZVG7o9fusP21f5lg2HAg6TNKalEGjBJXw4OBACD0HPsVOPZUu4HSXbN836iD7oNhR8BbcYyWL8grKSJsXXdQX93dtqBGFGY92mRV/vy+7fkrDqk2iAQhyRjCTKuUmzYdHcGSvOQ/0shai4MPP4DMx/no2yrAM2GlQur142aY9K9DsH7l+7adH3U6mUWa6PkQcjd1lCynJlwi44CtBR2y9aJt0Bs+dzYEZyJxCHtjQwMJnOXNz/Iqxy2Xk0obq7PavfutC37CPRBgWPODtkBN9zHhBPbN2F6FASo0CxRoKQSHtoLwIF/ygM9w3hQVCH1uJh6IEjswnXsbfJhxn01SoY0TkaWYbndRle9qsYmBvRjmbqYngw0/wxiF6NB35nox8T5cDl0+6Wwk3HOgMGHAUX53voxIWe5d5jZftfRr3+khERhGnQvT60/z7U+88AHi5pmlqW73kPWL77XQQwDgO+UpsUIyfvGVlx3ITr+1vlvVJ4fkfHBquz8yIXPbs0K/p2wR+oB0z46NGWhCsHNW/9/Bk3nbRy44I/ahiEy+uVnRdlZ7Uvf7fv+OtZFqqN9CkgrsRB/XFh9ycyzv7XnYxxGbN6esZFlc2rFn1ZrQAIcTwmcLq7oYGtm03X+RyaoNSS2DY1M9KnxEnTjoYB+AquOdJEMQTLhift/Powf5QJUFAORuZq+Rb73wci9BQ8kzckjS8ybs8xPypjsmlw/agc8FJRd/DaMPgJ7yV/5yP/E1UtJP9LCCdfBGFYhfpjIAjsDkc4jm6M737ah+8jnuji7OKzIfXDs06Y0WzWZVbkPQSTGgxQPO3s6HooL7+cS0VFlCTz09xYs2XRH2DaXJm06m7N+AfwXsNCsxK05fluNmk1NGXcA3cip31DZwfCwGBypOBamI5/R9JGGaeXYZ8I7S9DusJCNM5x3MvWb1/0Fzl6QIBkS1X+qV4AEJ9GmyY08BN+S9svDdM6CyYK8wgzCw3WYPrOpaDOddJngCavCMcgBi7GTzoVGm2qNK0IGzF/NGvj/g9wfu+WMA97tRRsQ0x8+QjxuHhVjhTNzaXKlkaRETDV75BylH04y0zHtdeL46Q9LW8H/dEz6NCs7o5tG8TEUx+DPt4OJoUQSD+GCgNOsZUwPedyINwtpkxpEHV1pV/2Sy9Z4pln+oXdf3jAY4VN+4hkMQ2FH8sQx0Dz81YnmhuM6qxLL75tzvTlH62zGy/vd/Zh9DVU33XBge9Exu2FP9B4Bsr/q7HJ+IY2hYxuIztn2tKvw+4/E6YP+xbB/ATmZ1Eu2Z/Z94ObNy++T5tOA81Ud1W9ALC9wESB57sag9ZZeSgoZ9gQlyPvOhCSnctpkrxy0Ze7d0stYVL7w3QOTCzAhaK14A277m3S7JGmGJcTdEfD8k3FOD09/CanVpuG8JmCyaTTmrPRglLQtCEZe+vW3xnjJs3zTWuDQIxRlqBZAesC2H5M3ucc4oL6xTeogJSAE0s9W5wQb5JZ5eNXDEHet6eFl8bV6CZ3/v49+0+3rORx8N8R6ZHKL6xOAgzuYX4gNff0ZQ+tfHjBwywEu3+aaSS+2ZvdT5unFC9mERlK9mf3/3bN5sW0NsTK7vkwMxbwclhpiBc6BGztDI+x7gJDvgINhk4EGkw5wxPslklKMCpzhk0ZBvzwhzHTanQo51fOQJI5pHngecYtErvuc13BtTWlEqZwSz1+E5/5QaTHdHdu70Q/H8s5xhj2FCnFB8T4qR9QOJa5/shPhPeX9jVToFwUzMr/phBupAZeev+SflOYlzAcCdA03aKUC9uVz+B/bOho2ZDkKIIZY4RJpQNByyEcZ9iacAoSWbcfLol5MbFlXRSPaotFyk7DEwB2ihpYaafbMWSjYSPoDL4NKDEDGrzSFMwfWNnkhTCljoRZAGkHMIYNMRqg5/fJ5RZyjiCltFqlbRxK5XPzJVhGIrspNTXeO/prwtzznWMluu1qVDwUUGfkhv7A6s1XbfPc7NX1CfjsMFNK4GbByaUpdFTTO19de7zTvAYmzdHIy+J9ltL+LvwDvH7jnzipxjaHmlQrgcOgR8MVACGdYYD1fO+WwPnNc4YhC75xnmg57d1SoytneBASgzK08yuMOUXOr0HuN01/tawzTE02qN03OQO68JkCFKhYIQWWYXLN0SGX4A+A4X1j7ebF1/RnD2yEs0s/oJQQyNCoZdgz4cxe3pfZR+VVwu4XmTqEPOEcd67duHCVjCCVuayiXGINXwCkU4ihWc4MiwfVKCBj20ojWPZoRGsukQjlNF0J9PTMb8tppyEMMiXn/NIegKeN++fcpHuvhJBOc7SpJEUNs5XAGMGyUYOZnP0ewXarBo3QaKfkIcyRz+x3+3oRGk1AbKM6woYs13UQLskyMlWK/1wISbLf6X05M+rAbFZs6dwJhVDbVAqB8lsKhmY1M1xQDRqbtKhgZjjn/EL7q5nfgMlhUpkS3VulU03TiyZYOclk2FIm/V1OrYNeBjbu+wsapRGELmJk+GtB/iF0o0Oj6x5e+CL8gXmYIMPrli89Gkua/dL0jyzCoDdeNtjHMGbe+t+f30+fg75HZI0qH5SyvcoHGWhid1/9z8zRvS9CU78XRpGLd0eNzUmYD4kJU85AyPRX8GCswBwKg6+c3wnTDxdeNs/5lQ4PnV/XM6z1smLOTAoDU5Rny5lqqJCWhBhOGLRWq1uL0BNyU0w3FKL4qIqC8uWDeqSf72HazXheVkm3gwHS8vJQ+sPQaEouZrvqjjnTl30codHLhgiNDoG+n21INCb7nAPXrNu48Fe1CnmGNVobAeDLkuHIdJ+aGTa/KFzG8Dipo2aGMVk2Fxm/CkMilxeEVS3P4czvEWB42JNy9ZUD59f2XZg+O7b8HnOIgDvkLioN1hePbnxN3gyeLdZl3rzvYxDff6mz1xrbdg7mcU9TJp+MqMjZZfhAT8k9CBLD2mvAmnUcoVHCGt3kXbl/7/7ptpl8PxzcUqHRqKYR8mygr7AFSx2uBmthGQbXYo1MqpUAKGd4YGb4i0BXmyictCL252Pd+1HQ/n/GNTWcJBgf5JLW6oavnV+YADIp51cYayQkmlzp4En0F+rQ4vEbzZZJ6yCivUPYnOGQ2LLPEKy51xP1V4ue9D6EfOmZhpdnLs04tZw5ugwnmpqafNCjV4xtfQ/We64tcvgDnyer/B2pYOR8SjTMN/HJQGh0Qf/s6csuQVBkmwqNypdQglh5SMN3wCoahDz7erE8VvqN7e3fsNLpEpOKedWruaydANAZVrO3uzAz/HP4qx8D45NfubaFM8ON2PzxGXD9DXoCrQBhvROqZcokcNxk1CX3Aj86v4h9es7vsevrXtGD5TjlO7+AYXBh1xUFbVV6wyVFDqJ1/W98G1X3ATcIQAnSKfyI/9Cppe1MaIPb4N8cJZdFC2musa6a73AhGEyBmSSvD9E/OjSKBXNdc2cs+wpCntf2ZfcyhF2CWHmd4XYyLIXud/rmrt/02RcY8uzEHoG8EjW/LA+xcpvVDqwQnBlWM5iqLvQoWN/3ZuH2hmBmOBSqaXhwfoGWWviGUUQ6vyZMqvVydrUyTag0jzSlQpsrJ1MxI/Q6drwNHrUKIASLuFpbx4gD9liMOKFCYJv+GGwdHY+Hn0Ag5CMSBPcE5OLhXAyXqBNu9kaxa/tTcjRJd46YGVDQhWHeqJWZvrFmo/Ft+ANn2Fbdxxw3Q0WIUbRkQsizkbO9N6/bvOhO5VOMLPMTm9oKgHaG+/9yt1l3xB/gDB8bOMM0g+gMnyLGTZohdm7fGIwW0qIBHjBp0o4ocH4lwchw1IRZz/JvJcIQHl1H3pb5h/3UzJg/HEflsQyfMQ4DHDAESDNoqNaCsF5fYrxpm5uk0MsqGpyqj3GekxkIlqAJ0oVm1sC7yEABgPmd5739o+S0fwUj3lAIHpTnHS3fSHT2iAzW312NnlERagLk0z6HCzIdy0zC6d33+0Rfcr58EPgUuUIjdEFbvJZJOcNckOUbmBkG+FxMWM0MI1Q6RzaYP4kVHHViMfJj2e+UJpP2Ezjza4h7xePbn5NCE+Y7DN0DEp595YfX+hOVp8uTNQPNBT+g3CUVPqLiMhKIJc4M62D+H98DHzI9RzhPLptFWdkGhADLnU0rifwD2OdwtngeQYW8rZHA+62QDDUKoDe+890AYaVMQrBnp/GxsWqUixsuphlV7e6uEPBDZpEBapukrQou9eTMMDuunWEyENu6QJwyo1lqfMWQ1Op6eNfOr8ZL6kZwyGqJZL7QyIyy/hAHzk5W+8nIV4Tt/mBOwiojWdihBhnSa29UDQoVE77lSMFRicJFoqjZU5o9nvOCZxinyn0ONPf0CREo9FZICFlKqwKO8DX1iaYzHC/DvkWaPyBGti4xGsOs+wXMKP+adn+1u7uqoY9mtGrqRtQJnGHYrtDc98uVnOolg4+xpseyx5iZzEWyMsOeaubXF+Mmt2HSow1CQibjRAnXElFonnZ3bL9Plq+MGcB+kufegD6e4nnWiQjMtmAeYaz8CHMcbLJxBdf59yyn7lnuRJhgk8W7R8md2iKRjPIFlHm2c+uj0PrfQV9JXwp3HvPLnqg/cgcZ+sjdYSzjuTd69e5YsWPbkyqsfOhGffJ6kbvU8frZU5eelTCTXwuWOpQys7G7S4Y8H1y7cdF/FO7u8g0ufcgBH6GLUshV32TOGcYyaV+cA0CaAaAVafKK2chbLjV/U5PspMnQZ77zq5c9e+4tKKt3oClNWRlmnmg0d4qtW4L9tZVVLio90I+iBwW3EGqvs/PLRkvbOMOyPwlzh5GMZEEZ9sn3D+DzMvTDPdhYtEpqfRZSEbFq+lrUxMG7JfOmUguyM9tuGAO9c4e0fJVvE6Vkg91dvf+LLc8ziane3YWQqplCZmcnR0cGFsKDCbXo3cgIgHaG69x7zD7jOWjy99PbwyuXzrBvWK1i/JSpYsfWLXKIH+z8KtPJdfs917xddpTLnkut+S9Fjb9kRuNxr2IsdaxLqeLhz1qAU4qPhjaDdu6Upo3f3PBpY3cvljhb49B9MjQ1PRMmuejceNd5O7alZA7/KMb33mpmD1HXzFtvm+uTiVFHYc9umNCzaC7x/CDPca5Yt2nJbjl6YHcXtX4KO89mnX79iaZvXbl2o/EFJVypqFE3B6+ai5ERgHyN3dLGGPfX82aGOcOJHanuHPRoC5E2XZhEduJwRntwSybRM7/3iF1dz6uIUUqZF6xQadImi1rKUCUh5cGcUdqsEKOeca5ob5YnvHktrWfjRIsemGON0PbsA4UD68Rld76MlbKY23jkEcFdWmk4vW/BJOP1nRdl5k5bvgjM/6l+R+7uKh7x8nvGEyOSfZn9S2H3/0ybTihgcMEbKW169vVj6o84e8605Y+lUgtvl23UeCUoESrvhbJkpUlqbIZszPUQc75tMja1Z+AM+xeKE05pJliYwvPAHLzU+MjBE0cVDMf5JbziNLT2Lq5Rzb2cqUZYV27W734BnfokeklI7B8FkDfY2G8msYDs53K5+EDEB4/eOokRG0Z95s344UQcY7gUzE/kS9juODMIu7uw1OEJHJ+4hIWP7v6j1AZg8gTMH2/OjGWfr7Mbzt7T9xpDQzfPbV9+AttolxthWKN2STNc7SDmIGHIooPbs/UZRADvC5xhOoR0hrPQ+IdZSfvvRMuUE7AV8FRoRDIG8VFrYDx3l9OzVR14VZnzm8PgTbsITs+TWzYhBE4PNqL73kI4u2R8PQLBHMQxCKZ1OISA/VRzIQNK4E1Dv9yG4cTgQKsz5aFXrm//yEYEF0m943Ag6LuJM4MYJffk7i454QWmzwnS9BWtoMd3YUIRggthscEtZR2wFd5k6dwRFAA0nHOGocmV7tXtQcPjDAXTnAVb4PNBtIZaQDGImk+6RaLOUOBbOXEBHmx7r6drBUy8FRACmp16eh9GsMsjUD4Eh/nHed0kHQ75lDvQavryG2HSnOy4/exXKbPahWbHS/YXrtu4ZKcKeaYcaH2TgkRTyDO8/zLVygkKUlIesJUcdcrs6ct/SILoNnldi6QZshawBsPQznBz/X1cyyPDmvJUBxAJy4Tg238Ela4EY7BuYCJh9afr9IEQt0uAwbyCvH6r/gnoACFY6LtOGgxPVamFIIn+ZhAtusAcP/lbyPcQAi3FRIcEFci8nLSaM23ZRQhlzs9k9+MlGiXtfu7ugmb/6ZpNi1cU7O5qlyO/cBoyN9VZo09wvHxBCg7Ysuo+ixMkPsk2KSi1IsLICoB2hmnC+OLWwplh2QVqunxtJ/f8QjJ+JnY88qKMipS/7LkcmuS3VU75WpXxg/kOkKHhXAj/C7T/AVxKPr4p9DT9vmKNb7tMrpWi/3CIJs2889pvOAZvb500abiJPzoFu7sO/Kk/681isZYWtbuLzMxzQaHhL8GJb7PhQ4QKkjw0yzBunfeRVUfJRXc1miMohXR0dyp5EqzdkWt5cgvcAoNIwcl3TDE6kpI4c7SWicuXZerQAlftd/VYcbUszTksp8baiLNh/8Pkw1ivRkSFD0ZFzN6tFxPbJueOfKm+xcE1ZWhhcDZyDDqYZEZ+AmczoFlxed/ALjDpx3iOeWfSqh+F1R480CqKl+Aq0MiFZW/6M2/vWrJHMn0Kdj/mDtQosuIkoLaSgoSyYXDUAVt2wxivv/9OYhRsj4zAsRjn6PuwxqJLV/dEOcNPPPIspjTuyXOGNTTdCe387sTpZQ/Kh7Vyfke7wSSY3ERDgav2ox1YjXtl3zwfSUaGunbi9MELgxGR/Sc+fBfUfggL49QL7BGQI4FaC1RZOxWUDmZbfZ60QGbkJzh1QR6FWAwq+Kkkf86M5d+E3T89ow6yLWWSZOuTo3FgeObbqx9e/EsyP9sg3Ffufo+KFhnelaOShzVCkPogKlE8yc3xPBzrTPz2wNcYLWLUqBi/Su8Pjq2Zc4b9NVDw5wHJsE6qDSCGe7PsRGXLnsP6LUcTPBhj9dkPYzMOTl6W7eaPOGH1wvKwetlv8ixrkdzWmbQtkdEmfFjxEnl0itE3t3v7Xeb4SV/Fsudv0QdADZo82DuhI0PiAUjbKZgboJlEeg1P+AAAoZeC8CQ1PbQ54YtZ05adA0n8EK8hgr/FEST3c78vywQCAYs0Za9M0+6//kyc7MBDcolTKSbE7q56hjy7sLvrK4Sdf6BV7trf/w1Mm51vm4njh9hFZvMQLdu0r5l9+vJfYh/ylnyBIvxK08ERAO0MY00PfgTiaTjDJ+J9qj3DCmMwpXR+ez3bukNmDWfmt5AKNo65b+Vr5f/qEtAzcaCzk2mWAeucSVUdNKnZsa3T29F5LegxHk7xJXlCoCNDE42WST/2e7ZfELSiR4pKG4UBAgGC5oEdcr+s3Jz2NWNj0VobAN8CRj0JB8/Kx1yZibX8v0Ovr1iXXtSlyv6rm04bzlXtyxv7HLU0HfV4XEUUD2FNH3d39WZMU8jdXdJ36MSq4Fwy/BSEKpX+4l7gcanaRSbhAWzo2+LwwFHSNj3/zo4p3z955dYFvcOZKQ7TxDn0anih1vJILQYbFxSBNs7XaJz5ZXN3ice2vgxVA6Km8p8PDxXa29ydVvXHyyCKRZzl0D08ZHTtTr5kgcjQpYD9G8wHcATQ8IPIUOJ8c3zbtSw2jMiQGlk990mcQ/oTttm+O2VQqyOqcir8rW2ItZ+EcGOWM7j88Jp5YPBtLMOyqfa0fEHvEuP6AEIuCkQHKJQRibu7eBSpfyVMn+cYNeKIUlw4Bdh8xl1kmCj/EkwcFtF0KC5OW9HGKJGBw/y+JiuxRhZIyxFyUNlyMg6WAOQ2soAVbwu0HYdOxeRSQ2HO2PdUh8rBvLIyfHnVf7ABUr5rnOVXWbMlS3tK0EEE04JTjJ9c4pk6hZEhHgP/Zatl0uXVR4bUPgwwotT+LS0dSW3SYPXtKuzYEnA+ydRsO/dhHhmYZdgLxOld6bzKiS/xE2x6Z3aEkvK5u8vOZntvW7tpyXqOIHqPACsVJz5LYS5gzaZF/w5z6edcIYoykUKAZzgv6EAWP5Jxydxpy66gEBG3Yrjl3B88ASCxODOMX1mEFOc7w4ggYEmw7+5w9NHigclUTgeKyjDiwMS//PAF6W9e59/n5+vyOk9/qzoSKrIKtziyDJJcw5Hfrsou5y/te/o6j2951fMNrJqVIGlSaE2JGBEtRfwIyNggMlSWU4xoSgBMokHzJ1hJ2NyMc8qQZp9+/VTY3B+Gtkdb2Dc9ONXzGcvMmbpsCh77maakHAXgwP+WZhITMCYB5EfdiqxtyoNsnx81xp3HMu1pSXdeRqaeDnXOP7yhmRnnwOs4FEsrgxx83Q6/AciEU0wBvXHW5B8cR8c6cOgj2wh7cDAFIDczDG5fKZEx5MQJfikFdPWDzd9kCNXBMHxL5/FseZpX1GRca8+IwsA3r/Pv9XOdx3udp791HYQrATdnJMuwqvrdMBxjINvjlz7bvpKNOzoytHNbFyYGZ0laYC9hgIsaeQAb/3/BZSN5TnEILVIqz09ypAINiJpfB1gEp46GEcfJMqZnncyz9vGc/BQxsmEfKMoAzFhW2t//V1kOP+H0WkCqhIWfHdD/QB6DjOti2wcwvoSH58pRowxzluYRy/LcfxgEM4ExyW1LmLkWdEs4LNKw5e4knElaj3nFH3MECUysiL7Ibg/6QwocvBSENZ2ecx4UE+7H+h/GwXEAeiZrindYj0lEBnaHlY+X+uE+/LaW/8/4NcVr8T6x3RAvtVBjlw8vtKQHprSflI/qnNe9bKIdv1BJQQMDoQ8uftDOCZis0hFMnVdkuj3bbkG06lHo/1HgeI4+5E+8UEyVm/zVGQwHKlEDhqSUqtP/jhe9ujcmqbrgEz9re31GDyt8cO973LSqGZgYZfGLKrtnj2zf8Pb+xvGcKeDygjMb8SM5+KEXB4ezm6/fvHXxLuWcqpCnarL0X2pxMnJq08IH5k69fgJ2isLMNxy4cKFIYmQ0sOnGNYXV8HTbOxtFl9gDgqGR0OKhjZdfMrR6zTOJT8TLHbKt4dQdEnhegaHaGep5HqhBl1TZiokHPcplDAc+d1lJTYmfI51g2dbjiL2zPcLkpzAhUIEBDidyZCes2fLZHSqKM9iRLayk7oYTmZFCUMaoEdJuxbQZ3OkQqCOSRX+ApgJXTvJbac1qmT9AMWXC4CQTjUzKxzEff7Ym+9AOZgq0cLUY5MPVptQAjcisQwmIarkYzgDuBpiTH2/O9KX3YN/uJ/Dj170YLhmy0fxAu6gPxxM29GX33YMY/rlFzM+ZY6u5eVzufe3evdPgvf4Oi/hUQhK2p2Gxnr7W3/mwmAfHnqNTDp/856WuD64JlI9J5E8N5Req9BrMV4bDVSnU0PKh+KdDi1aUGQq3IgiqcDQcMolk9KRnzwGD/7oh0fRefLOeNrEs5DVAMF6o86y5fMC1O9iiqJOcOdY3I/E9XAEqFyct8eWWj8u9TSigNfr89uuOdJz65VCeF9hWvdywgBAobf6fIiJzFZ1SXfZt0vWCbsQCUECO/183+Yw9d+qyY7Fg8WRSwHSNXfwlSF7nl+F9nGIKvK0oQGeVTF7cKebxWXH+2+0+HgHebm+0yv4w8iLahWJ4+FG4L8/ZrrK9uFpMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgoMiwL/B0EimzaLfyxmAAAAAElFTkSuQmCC"}}]); \ No newline at end of file +"use strict";(self.webpackChunkredis_developer_hub=self.webpackChunkredis_developer_hub||[]).push([[2801],{85162:(e,t,a)=>{a.d(t,{Z:()=>r});var i=a(67294),s=a(86010);const n="tabItem_Ymn6";function r(e){let{children:t,hidden:a,className:r}=e;return i.createElement("div",{role:"tabpanel",className:(0,s.Z)(n,r),hidden:a},t)}},65488:(e,t,a)=>{a.d(t,{Z:()=>h});var i=a(87462),s=a(67294),n=a(86010),r=a(72389),o=a(67392),l=a(7094),d=a(12466);const c="tabList__CuJ",p="tabItem_LNqP";function u(e){var t;const{lazy:a,block:r,defaultValue:u,values:h,groupId:m,className:v}=e,g=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),A=h??g.map((e=>{let{props:{value:t,label:a,attributes:i}}=e;return{value:t,label:a,attributes:i}})),k=(0,o.l)(A,((e,t)=>e.value===t.value));if(k.length>0)throw new Error(`Docusaurus error: Duplicate values "${k.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const f=null===u?u:u??(null==(t=g.find((e=>e.props.default)))?void 0:t.props.value)??g[0].props.value;if(null!==f&&!A.some((e=>e.value===f)))throw new Error(`Docusaurus error: The has a defaultValue "${f}" but none of its children has the corresponding value. Available values are: ${A.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:b,setTabGroupChoices:y}=(0,l.U)(),[w,E]=(0,s.useState)(f),N=[],{blockElementScrollPositionUntilNextRender:Z}=(0,d.o5)();if(null!=m){const e=b[m];null!=e&&e!==w&&A.some((t=>t.value===e))&&E(e)}const x=e=>{const t=e.currentTarget,a=N.indexOf(t),i=A[a].value;i!==w&&(Z(t),E(i),null!=m&&y(m,String(i)))},J=e=>{var t;let a=null;switch(e.key){case"Enter":x(e);break;case"ArrowRight":{const t=N.indexOf(e.currentTarget)+1;a=N[t]??N[0];break}case"ArrowLeft":{const t=N.indexOf(e.currentTarget)-1;a=N[t]??N[N.length-1];break}}null==(t=a)||t.focus()};return s.createElement("div",{className:(0,n.Z)("tabs-container",c)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,n.Z)("tabs",{"tabs--block":r},v)},A.map((e=>{let{value:t,label:a,attributes:r}=e;return s.createElement("li",(0,i.Z)({role:"tab",tabIndex:w===t?0:-1,"aria-selected":w===t,key:t,ref:e=>N.push(e),onKeyDown:J,onClick:x},r,{className:(0,n.Z)("tabs__item",p,null==r?void 0:r.className,{"tabs__item--active":w===t})}),a??t)}))),a?(0,s.cloneElement)(g.filter((e=>e.props.value===w))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},g.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==w})))))}function h(e){const t=(0,r.Z)();return s.createElement(u,(0,i.Z)({key:String(t)},e))}},71131:(e,t,a)=>{a.d(t,{Z:()=>m});var i=a(67294),s=a(3905),n=a(52195);const r="riContainer_bco2",o="riDescriptionShort_E27B",l="riDetail_wzFs",d="riIcon_yDou",c="riTitle_x6vI",p="riDescription_RDnu",u="riMore_apRP";var h=a(86010);const m=e=>{const[t,a]=i.useState(!1);return i.createElement("a",{href:e.page,className:r},i.createElement("div",{className:o},i.createElement("div",{className:d},i.createElement("span",{className:"fe fe-zap"})),i.createElement("div",{className:l},i.createElement("div",{className:c},i.createElement("a",{href:e.page},e.title)),i.createElement("div",{className:p},e.description,i.Children.count(e.children)>0&&i.createElement("span",{className:(0,h.Z)(u,"fe","fe-more-horizontal"),onClick:()=>a(!t)})))),t&&i.createElement("div",{className:"ri-description-long"},i.createElement(s.Zo,{components:n.Z},e.children)))}},14020:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>l,contentTitle:()=>r,default:()=>p,frontMatter:()=>n,metadata:()=>o,toc:()=>d});var i=a(87462),s=(a(67294),a(3905));a(65488),a(85162),a(44996),a(71131);const n={id:"index-gettingstarted",title:"Java and Redis",sidebar_label:"Overview",slug:"/develop/java/getting-started"},r=void 0,o={unversionedId:"develop/java/getting-started/index-gettingstarted",id:"develop/java/getting-started/index-gettingstarted",title:"Java and Redis",description:"Find tutorials, examples and technical articles that will help you to develop with Redis and Java.",source:"@site/docs/develop/java/getting-started/index.md",sourceDirName:"develop/java/getting-started",slug:"/develop/java/getting-started",permalink:"/develop/java/getting-started",draft:!1,editUrl:"https://github.com/redis-developer/redis-developer/edit/master/docs/develop/java/getting-started/index.md",tags:[],version:"current",lastUpdatedAt:1698417267,formattedLastUpdatedAt:"Oct 27, 2023",frontMatter:{id:"index-gettingstarted",title:"Java and Redis",sidebar_label:"Overview",slug:"/develop/java/getting-started"}},l={},d=[{value:"Getting Started",id:"getting-started",level:2},{value:"Run a Redis server",id:"run-a-redis-server",level:3},{value:"Using Jedis",id:"using-jedis",level:3},{value:"Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:",id:"step-1-add-dependencies-jedis-dependency-to-your-maven-or-gradle-project-file",level:3},{value:"Step 2. Import the required classes",id:"step-2-import-the-required-classes",level:3},{value:"Step 3. Create a Connection Pool",id:"step-3-create-a-connection-pool",level:3},{value:"Step 4. Write your application code",id:"step-4-write-your-application-code",level:3},{value:"Redis Launchpad",id:"redis-launchpad",level:3},{value:"Movie Database app in Java",id:"movie-database-app-in-java",level:4},{value:"Leaderboard app in Java",id:"leaderboard-app-in-java",level:4},{value:"Ecosystem",id:"ecosystem",level:3},{value:"Develop with Spring",id:"develop-with-spring",level:4},{value:"Develop with Quarkus",id:"develop-with-quarkus",level:4},{value:"Develop with Vert.x",id:"develop-with-vertx",level:4},{value:"Develop with Micronaut",id:"develop-with-micronaut",level:4},{value:"More developer resources",id:"more-developer-resources",level:3},{value:"Redis University",id:"redis-university",level:3},{value:"Redis for Java Developers",id:"redis-for-java-developers",level:3}],c={toc:d};function p(e){let{components:t,...n}=e;return(0,s.kt)("wrapper",(0,i.Z)({},c,n,{components:t,mdxType:"MDXLayout"}),(0,s.kt)("p",null,"Find tutorials, examples and technical articles that will help you to develop with Redis and Java."),(0,s.kt)("h2",{id:"getting-started"},"Getting Started"),(0,s.kt)("p",null,"Java community has built many client libraries that you can find ",(0,s.kt)("a",{parentName:"p",href:"https://redis.io/clients#java"},"here"),". For your first steps with Java and Redis, this article will show how to use ",(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis/jedis"},"Jedis"),", the supported Redis client for Java."),(0,s.kt)("p",null,"Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT."),(0,s.kt)("h3",{id:"run-a-redis-server"},"Run a Redis server"),(0,s.kt)("p",null,"You can either run Redis in a Docker container or directly on your machine.\nUse these commands to setup a Redis server locally on Mac OS:"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre"}," brew tap redis-stack/redis-stack\n brew install --cask redis-stack\n")),(0,s.kt)("admonition",{title:"INFO",type:"info"},(0,s.kt)("p",{parentName:"admonition"},"Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack provides the following in addition to Redis Open Source: JSON, Search, Time Series, and Probabilistic data structures.")),(0,s.kt)("p",null,"Ensure that you are able to use the following Redis command to connect to the Redis instance."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-bash"}," redis-cli\n localhost>\n")),(0,s.kt)("h3",{id:"using-jedis"},"Using Jedis"),(0,s.kt)("h3",{id:"step-1-add-dependencies-jedis-dependency-to-your-maven-or-gradle-project-file"},"Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-xml"}," \n redis.clients\n jedis\n 5.0.2\n \n")),(0,s.kt)("h3",{id:"step-2-import-the-required-classes"},"Step 2. Import the required classes"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"}," import redis.clients.jedis.*;\n")),(0,s.kt)("h3",{id:"step-3-create-a-connection-pool"},"Step 3. Create a Connection Pool"),(0,s.kt)("p",null,"Once you have added the Jedis library to your project and imported the necessary classes you can create a connection pool."),(0,s.kt)("p",null,"You can find more information about Jedis connection pool in the ",(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis/jedis/wiki/Getting-started#basic-usage-example"},"Jedis Wiki"),". The connection pool is based on the ",(0,s.kt)("a",{parentName:"p",href:"http://commons.apache.org/proper/commons-pool/apidocs/org/apache/commons/pool2/impl/GenericObjectPoolConfig.html"},"Apache Common Pool 2.0 library"),"."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},' JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);\n')),(0,s.kt)("h3",{id:"step-4-write-your-application-code"},"Step 4. Write your application code"),(0,s.kt)("p",null,"Once you have access to the connection pool you can now get a Jedis instance and start to interact with your Redis instance."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-java"},' // Create a Jedis connection pool\n JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);\n\n // Get the pool and use the database\n try (Jedis jedis = jedisPool.getResource()) {\n\n jedis.set("mykey", "Hello from Jedis");\n String value = jedis.get("mykey");\n System.out.println( value );\n\n jedis.zadd("vehicles", 0, "car");\n jedis.zadd("vehicles", 0, "bike");\n Set vehicles = jedis.zrange("vehicles", 0, -1);\n System.out.println( vehicles );\n\n }\n\n // close the connection pool\n jedisPool.close();\n')),(0,s.kt)("p",null,'Find more information about Java & Redis connections in the "',(0,s.kt)("a",{parentName:"p",href:"https://github.com/redis-developer/redis-connect/tree/master/java/jedis"},"Redis Connect"),'".'),(0,s.kt)("h3",{id:"redis-launchpad"},"Redis Launchpad"),(0,s.kt)("p",null,"Redis Launchpad is like an \u201cApp Store\u201d for Redis sample apps. You can easily find apps for your preferred frameworks and languages.\nCheck out a few of these apps below, or ",(0,s.kt)("a",{parentName:"p",href:"https://launchpad.redis.com"},"click here to access the complete list"),"."),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col "},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"movie-database-app-in-java"},"Movie Database app in Java"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"launchpad",src:a(17970).Z,width:"2274",height:"1486"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"http://launchpad.redis.com/?id=project%3Ademo-movie-app-redisearch-java"},"Movie Database app in Java")," based on Search capabilities"))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"leaderboard-app-in-java"},"Leaderboard app in Java"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"launchpad",src:a(6116).Z,width:"1262",height:"1010"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"http://launchpad.redis.com/?id=project%3Abasic-redis-leaderboard-demo-java"},"How to implement leaderboard app")," using Redis & Java(Spring)")))),(0,s.kt)("h3",{id:"ecosystem"},"Ecosystem"),(0,s.kt)("p",null,"As developer you can use the Java client library directly in your application, or you can frameworks like: ",(0,s.kt)("a",{parentName:"p",href:"https://spring.io/"},"Spring"),", ",(0,s.kt)("a",{parentName:"p",href:"https://quarkus.io/"},"Quarkus"),", ",(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/"},"Vert.x"),", and ",(0,s.kt)("a",{parentName:"p",href:"https://micronaut.io/"},"Micronaut"),"."),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col "},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-spring"},"Develop with Spring"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Spring logo",src:a(66725).Z,width:"800",height:"206"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://spring.io/projects/spring-data-redis"},"Spring Data Redis"),", part of the larger Spring Data project. It provides easy access to Redis from Spring applications."))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-quarkus"},"Develop with Quarkus"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Quarkus logo",src:a(50187).Z,width:"1281",height:"196"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://quarkus.io/guides/redis"},"Redis Client extension")," allows you to connect your Quarkus application to a Redis instance.")))),(0,s.kt)("div",{class:"row text--center"},(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-vertx"},"Develop with Vert.x"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Vert.x logo",src:a(26364).Z,width:"192",height:"84"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/introduction-to-vertx-and-reactive/"},"Eclipse Vert.x")," is a framework to build reactive applications on the JVM. ",(0,s.kt)("a",{parentName:"p",href:"https://vertx.io/docs/vertx-redis-client/java/"},"Vert.x-redis")," is redis client to be used with Vert.x."))),(0,s.kt)("div",{class:"col"},(0,s.kt)("div",{className:"ri-container"},(0,s.kt)("h4",{id:"develop-with-micronaut"},"Develop with Micronaut"),(0,s.kt)("p",null,(0,s.kt)("img",{alt:"Micronaut logo",src:a(61110).Z,width:"1315",height:"298"})),(0,s.kt)("p",null,(0,s.kt)("a",{parentName:"p",href:"https://micronaut.io/"},"Micronaut")," is a framework for building microservices and serverless applications. The ",(0,s.kt)("a",{parentName:"p",href:"https://micronaut-projects.github.io/micronaut-redis/snapshot/guide/"},"Micronaut Redis")," extension provides the integration with Redis.")))),(0,s.kt)("hr",null),(0,s.kt)("h3",{id:"more-developer-resources"},"More developer resources"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/brewdis"},"Brewdis - Product Catalog (Spring)")),"\nSee how to use Redis and Spring to build a product catalog with streams, hashes and Search"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/redis-streams-in-action"},"Redis Stream in Action (Spring)")),"\nSee how to use Spring to create multiple producer and consumers with Redis Streams"),(0,s.kt)("p",null,(0,s.kt)("strong",{parentName:"p"},(0,s.kt)("a",{parentName:"strong",href:"https://github.com/redis-developer/vertx-rate-limiting-redis"},"Rate Limiting with Vert.x")),"\nSee how to use Redis Sorted Set with Vert.x to build a rate limiting service."),(0,s.kt)("h3",{id:"redis-university"},"Redis University"),(0,s.kt)("h3",{id:"redis-for-java-developers"},(0,s.kt)("a",{parentName:"h3",href:"https://university.redis.com/courses/ru102j/"},"Redis for Java Developers")),(0,s.kt)("p",null,"Redis for Java Developers teaches you how to build robust Redis client applications in Java using the Jedis client library. The course focuses on writing idiomatic Java applications with the Jedis API, describing language-specific patterns for managing Redis database connections, handling errors, and using standard classes from the JDK. The course material uses the Jedis API directly with no additional frameworks. As such, the course is appropriate for all Java developers, and it clearly illustrates the principles involved in writing applications with Redis."),(0,s.kt)("div",{class:"text--center"},(0,s.kt)("iframe",{width:"560",height:"315",src:"https://www.youtube.com/embed/CmQMdJefTjc",frameborder:"0",allow:"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture",allowfullscreen:!0})),(0,s.kt)("div",null,(0,s.kt)("a",{href:"https://launchpad.redis.com",target:"_blank",rel:"noopener",className:"link"}," ",(0,s.kt)("img",{src:"/img/launchpad.png",className:"thumb",loading:"lazy",alt:"Redis Launchpad"}))))}p.isMDXComponent=!0},6116:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/basicleaderboardjava-71718a9cf1490e5d50bebbe7c0074b7d.png"},17970:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/moviedatabasejava-b17f667506e72ebe506f8e0191d5950e.png"},61110:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/micronaut-dc77eb63819d66d67d3af937bc6e7061.svg"},50187:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/quarkus-d70048326a3d8d1251647944ca959f8c.png"},66725:(e,t,a)=>{a.d(t,{Z:()=>i});const i=a.p+"assets/images/spring-840a3a0aafc4886b3a2bb441b6d4beda.png"},26364:(e,t,a)=>{a.d(t,{Z:()=>i});const i="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMAAAABUCAYAAADUKzhSAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAMCgAwAEAAAAAQAAAFQAAAAAmofVGQAAAAlwSFlzAAALEwAACxMBAJqcGAAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTMInWQAAIxtJREFUeAHtXQmYXFWVvm+p6u6kO4jQonwoiCAknUSZhnRWaHAcBQcFpBHIANlIZsjiN87nrkwpIzoOLpBlICuEbUz7KZ+sDo6UZO1gi0A6REBANpUgDtm6u+ot8//3vltdVf1edVV1dQjMu0l1vXffveeee945555z7lJCxCmmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKXDIU8CoGsP2dlvs3Tu4fne3C5he1XBLVzREa6sdWqS7Oys6Oizx7LNm6PNaZBb0DW1VmzpQUePZfTxo1UmalUoG+maKzlJFRuJZJ9+jj//G/NaV4XQPafbopg/6qfSZTsijsrNS7Q/Zr+x9ajB/RUBY2b2A7QHXQyONBBOWTYwRJsFI4EHBTgDvMNhheSPcxQHwKZGq6l36EJoBKJVeVV234jYrrqC7Yo2f/Bnf948whJGlhsC783xfNODPA+LJrqdRjoSr1UhAPH3R0tJoiaaLfd+zDMOQ0g4ccClcd9yx6+xdL57pud5YwzB7UbyqF6f7V/htoGt+0nONe8Wubc+LE9rGmHXiJuS9A+WgeYCBID6SDsE3ISgcC/N9DzQ7gId/xPdO1/C7xI6ux1laJo6s6bTSnhzROjtdu+W0j/vC+JInjDcAM6Rf+Qyj8SA0fa3xYF5xWZ2ny8jnHJEamyz7q3se37xt9unfe6/pJS/wfXMfQEaPfHgnhu8l8EpeW7t50YaBfrONspN817OnLf2MaYgjADCLV8m88AS2w/OkXZ+4beUvFoA++h2EFy/OLXtYy1UMXpDv+X8v6ur/wXfA/3z/YBFhJ4SR6fsJrj4t2ttNvMjaCADNHpg4lt/Y4dclVwnXgTToNm3hZ/ofB6Os9lsmfU3UNZyRwymH9HAv0CPTFpaf2Q3OeF7UeaOQcaFhJdA4nkW/noiGVQXUFKYHiOPbfgP2u8nt6VopmZ+jAU06ZSa5YPwTRLL+DEPTOgJqzbKJmGWJvqx7DGGu23/Hn+Y2zLl4TMORk3uze4VpQgZYZlBSmZaZEHOnL2tcs8lY29GSSnb2pDKDioZk6LKzpy2bN7rusFWOlw1IG05gz3dEQ/Iwsaf3z3eB+ZdztEpBEYeAjsyqXACam2UvPeF9x8z0XQrI1Baq51kXmlmc7U9sOwYv8iXk12YUULY3GW2uIBN4HjS8sHHvCMdvAHmuwT2Hmz2GA1r7eO7hee2SKzy/Hu31SZBWwsOY8zoE8V24B0LBe6qsPc1CNjjqb3zTvMlsaVvimeIK0d3VDQVC/GUZQ5h9voNBwfPYfi37FYGxj3fq12UMV/UXwjhmysyz3uh79SmMrsf4rs/8qJGADFgHzFfNmbZi89rNV/2uvT1lp9MpNapFtJhCmVQ6lZk1ZenJ4KGbDmSgzIXoxydkxOMj4ZiG1fBG76s96zYtPl/mVPEnCng0qE7pGJli5yM9GHs2CtOi7UpiJNFpD5qywXR9CoaAXRtFJPm4rD80A8jbE9paYexMAxOQKRrwQb7ZgPsXXbHvAQULQ6UcjQRxquHHT0q4HthzIEEApWYiQ1bz0fgZ6IMDYcpCEFpM3/i1NW5ShxwJ+vtZBgnm3PDaqhA/A+UNjHim7O+5rfNH/WDr53qBxuWWIVGqA1Ia/+JvPPOdhF0HnD2YQQJdSTnglXA1zt7hGZhfCohpGRsSFupK0xKCFNoO8RMNqAeSeZfhWtBpTolURdqf9fJfKO/LSb5mbNMwVgcvRmszE9oXMIwrJCAO49VpxwE8gmiJ6Yk5EDbma03iove8v1309OzjBUgZRQCOUqxX5cdAPXQx8DvYUkQiHdiGbo/f+pOfz2vShs+Y+EITEATYu3itlrXBmtB2jti6lSMdEoZ1mlrR+Os2+K3bYfnilI+fLqvL8zu4hm3B/gZ/7m5amZnfelNi7eaFD2W9zL/VJ0aTmUuYNYaddfsyKDdx9oylSwloQYkokn42F2VRZwLrogppEpF8p84eDZJ4/7Lm4SWP0nSqNupUjQAI2KcklHDN3p9Cc/0JjEFkyXymgBcKxhxnj538tyyDoXw4o4ApbeHW1sMA6SLAJkTC49tJ0BfwTPMWZpZMJgxa04KpUe3HTApUhVIMNHJoa3BLoKZVG7o9fusP21f5lg2HAg6TNKalEGjBJXw4OBACD0HPsVOPZUu4HSXbN836iD7oNhR8BbcYyWL8grKSJsXXdQX93dtqBGFGY92mRV/vy+7fkrDqk2iAQhyRjCTKuUmzYdHcGSvOQ/0shai4MPP4DMx/no2yrAM2GlQur142aY9K9DsH7l+7adH3U6mUWa6PkQcjd1lCynJlwi44CtBR2y9aJt0Bs+dzYEZyJxCHtjQwMJnOXNz/Iqxy2Xk0obq7PavfutC37CPRBgWPODtkBN9zHhBPbN2F6FASo0CxRoKQSHtoLwIF/ygM9w3hQVCH1uJh6IEjswnXsbfJhxn01SoY0TkaWYbndRle9qsYmBvRjmbqYngw0/wxiF6NB35nox8T5cDl0+6Wwk3HOgMGHAUX53voxIWe5d5jZftfRr3+khERhGnQvT60/z7U+88AHi5pmlqW73kPWL77XQQwDgO+UpsUIyfvGVlx3ITr+1vlvVJ4fkfHBquz8yIXPbs0K/p2wR+oB0z46NGWhCsHNW/9/Bk3nbRy44I/ahiEy+uVnRdlZ7Uvf7fv+OtZFqqN9CkgrsRB/XFh9ycyzv7XnYxxGbN6esZFlc2rFn1ZrQAIcTwmcLq7oYGtm03X+RyaoNSS2DY1M9KnxEnTjoYB+AquOdJEMQTLhift/Powf5QJUFAORuZq+Rb73wci9BQ8kzckjS8ybs8xPypjsmlw/agc8FJRd/DaMPgJ7yV/5yP/E1UtJP9LCCdfBGFYhfpjIAjsDkc4jm6M737ah+8jnuji7OKzIfXDs06Y0WzWZVbkPQSTGgxQPO3s6HooL7+cS0VFlCTz09xYs2XRH2DaXJm06m7N+AfwXsNCsxK05fluNmk1NGXcA3cip31DZwfCwGBypOBamI5/R9JGGaeXYZ8I7S9DusJCNM5x3MvWb1/0Fzl6QIBkS1X+qV4AEJ9GmyY08BN+S9svDdM6CyYK8wgzCw3WYPrOpaDOddJngCavCMcgBi7GTzoVGm2qNK0IGzF/NGvj/g9wfu+WMA97tRRsQ0x8+QjxuHhVjhTNzaXKlkaRETDV75BylH04y0zHtdeL46Q9LW8H/dEz6NCs7o5tG8TEUx+DPt4OJoUQSD+GCgNOsZUwPedyINwtpkxpEHV1pV/2Sy9Z4pln+oXdf3jAY4VN+4hkMQ2FH8sQx0Dz81YnmhuM6qxLL75tzvTlH62zGy/vd/Zh9DVU33XBge9Exu2FP9B4Bsr/q7HJ+IY2hYxuIztn2tKvw+4/E6YP+xbB/ATmZ1Eu2Z/Z94ObNy++T5tOA81Ud1W9ALC9wESB57sag9ZZeSgoZ9gQlyPvOhCSnctpkrxy0Ze7d0stYVL7w3QOTCzAhaK14A277m3S7JGmGJcTdEfD8k3FOD09/CanVpuG8JmCyaTTmrPRglLQtCEZe+vW3xnjJs3zTWuDQIxRlqBZAesC2H5M3ucc4oL6xTeogJSAE0s9W5wQb5JZ5eNXDEHet6eFl8bV6CZ3/v49+0+3rORx8N8R6ZHKL6xOAgzuYX4gNff0ZQ+tfHjBwywEu3+aaSS+2ZvdT5unFC9mERlK9mf3/3bN5sW0NsTK7vkwMxbwclhpiBc6BGztDI+x7gJDvgINhk4EGkw5wxPslklKMCpzhk0ZBvzwhzHTanQo51fOQJI5pHngecYtErvuc13BtTWlEqZwSz1+E5/5QaTHdHdu70Q/H8s5xhj2FCnFB8T4qR9QOJa5/shPhPeX9jVToFwUzMr/phBupAZeev+SflOYlzAcCdA03aKUC9uVz+B/bOho2ZDkKIIZY4RJpQNByyEcZ9iacAoSWbcfLol5MbFlXRSPaotFyk7DEwB2ihpYaafbMWSjYSPoDL4NKDEDGrzSFMwfWNnkhTCljoRZAGkHMIYNMRqg5/fJ5RZyjiCltFqlbRxK5XPzJVhGIrspNTXeO/prwtzznWMluu1qVDwUUGfkhv7A6s1XbfPc7NX1CfjsMFNK4GbByaUpdFTTO19de7zTvAYmzdHIy+J9ltL+LvwDvH7jnzipxjaHmlQrgcOgR8MVACGdYYD1fO+WwPnNc4YhC75xnmg57d1SoytneBASgzK08yuMOUXOr0HuN01/tawzTE02qN03OQO68JkCFKhYIQWWYXLN0SGX4A+A4X1j7ebF1/RnD2yEs0s/oJQQyNCoZdgz4cxe3pfZR+VVwu4XmTqEPOEcd67duHCVjCCVuayiXGINXwCkU4ihWc4MiwfVKCBj20ojWPZoRGsukQjlNF0J9PTMb8tppyEMMiXn/NIegKeN++fcpHuvhJBOc7SpJEUNs5XAGMGyUYOZnP0ewXarBo3QaKfkIcyRz+x3+3oRGk1AbKM6woYs13UQLskyMlWK/1wISbLf6X05M+rAbFZs6dwJhVDbVAqB8lsKhmY1M1xQDRqbtKhgZjjn/EL7q5nfgMlhUpkS3VulU03TiyZYOclk2FIm/V1OrYNeBjbu+wsapRGELmJk+GtB/iF0o0Oj6x5e+CL8gXmYIMPrli89Gkua/dL0jyzCoDdeNtjHMGbe+t+f30+fg75HZI0qH5SyvcoHGWhid1/9z8zRvS9CU78XRpGLd0eNzUmYD4kJU85AyPRX8GCswBwKg6+c3wnTDxdeNs/5lQ4PnV/XM6z1smLOTAoDU5Rny5lqqJCWhBhOGLRWq1uL0BNyU0w3FKL4qIqC8uWDeqSf72HazXheVkm3gwHS8vJQ+sPQaEouZrvqjjnTl30codHLhgiNDoG+n21INCb7nAPXrNu48Fe1CnmGNVobAeDLkuHIdJ+aGTa/KFzG8Dipo2aGMVk2Fxm/CkMilxeEVS3P4czvEWB42JNy9ZUD59f2XZg+O7b8HnOIgDvkLioN1hePbnxN3gyeLdZl3rzvYxDff6mz1xrbdg7mcU9TJp+MqMjZZfhAT8k9CBLD2mvAmnUcoVHCGt3kXbl/7/7ptpl8PxzcUqHRqKYR8mygr7AFSx2uBmthGQbXYo1MqpUAKGd4YGb4i0BXmyictCL252Pd+1HQ/n/GNTWcJBgf5JLW6oavnV+YADIp51cYayQkmlzp4En0F+rQ4vEbzZZJ6yCivUPYnOGQ2LLPEKy51xP1V4ue9D6EfOmZhpdnLs04tZw5ugwnmpqafNCjV4xtfQ/We64tcvgDnyer/B2pYOR8SjTMN/HJQGh0Qf/s6csuQVBkmwqNypdQglh5SMN3wCoahDz7erE8VvqN7e3fsNLpEpOKedWruaydANAZVrO3uzAz/HP4qx8D45NfubaFM8ON2PzxGXD9DXoCrQBhvROqZcokcNxk1CX3Aj86v4h9es7vsevrXtGD5TjlO7+AYXBh1xUFbVV6wyVFDqJ1/W98G1X3ATcIQAnSKfyI/9Cppe1MaIPb4N8cJZdFC2musa6a73AhGEyBmSSvD9E/OjSKBXNdc2cs+wpCntf2ZfcyhF2CWHmd4XYyLIXud/rmrt/02RcY8uzEHoG8EjW/LA+xcpvVDqwQnBlWM5iqLvQoWN/3ZuH2hmBmOBSqaXhwfoGWWviGUUQ6vyZMqvVydrUyTag0jzSlQpsrJ1MxI/Q6drwNHrUKIASLuFpbx4gD9liMOKFCYJv+GGwdHY+Hn0Ag5CMSBPcE5OLhXAyXqBNu9kaxa/tTcjRJd46YGVDQhWHeqJWZvrFmo/Ft+ANn2Fbdxxw3Q0WIUbRkQsizkbO9N6/bvOhO5VOMLPMTm9oKgHaG+/9yt1l3xB/gDB8bOMM0g+gMnyLGTZohdm7fGIwW0qIBHjBp0o4ocH4lwchw1IRZz/JvJcIQHl1H3pb5h/3UzJg/HEflsQyfMQ4DHDAESDNoqNaCsF5fYrxpm5uk0MsqGpyqj3GekxkIlqAJ0oVm1sC7yEABgPmd5739o+S0fwUj3lAIHpTnHS3fSHT2iAzW312NnlERagLk0z6HCzIdy0zC6d33+0Rfcr58EPgUuUIjdEFbvJZJOcNckOUbmBkG+FxMWM0MI1Q6RzaYP4kVHHViMfJj2e+UJpP2Ezjza4h7xePbn5NCE+Y7DN0DEp595YfX+hOVp8uTNQPNBT+g3CUVPqLiMhKIJc4M62D+H98DHzI9RzhPLptFWdkGhADLnU0rifwD2OdwtngeQYW8rZHA+62QDDUKoDe+890AYaVMQrBnp/GxsWqUixsuphlV7e6uEPBDZpEBapukrQou9eTMMDuunWEyENu6QJwyo1lqfMWQ1Op6eNfOr8ZL6kZwyGqJZL7QyIyy/hAHzk5W+8nIV4Tt/mBOwiojWdihBhnSa29UDQoVE77lSMFRicJFoqjZU5o9nvOCZxinyn0ONPf0CREo9FZICFlKqwKO8DX1iaYzHC/DvkWaPyBGti4xGsOs+wXMKP+adn+1u7uqoY9mtGrqRtQJnGHYrtDc98uVnOolg4+xpseyx5iZzEWyMsOeaubXF+Mmt2HSow1CQibjRAnXElFonnZ3bL9Plq+MGcB+kufegD6e4nnWiQjMtmAeYaz8CHMcbLJxBdf59yyn7lnuRJhgk8W7R8md2iKRjPIFlHm2c+uj0PrfQV9JXwp3HvPLnqg/cgcZ+sjdYSzjuTd69e5YsWPbkyqsfOhGffJ6kbvU8frZU5eelTCTXwuWOpQys7G7S4Y8H1y7cdF/FO7u8g0ufcgBH6GLUshV32TOGcYyaV+cA0CaAaAVafKK2chbLjV/U5PspMnQZ77zq5c9e+4tKKt3oClNWRlmnmg0d4qtW4L9tZVVLio90I+iBwW3EGqvs/PLRkvbOMOyPwlzh5GMZEEZ9sn3D+DzMvTDPdhYtEpqfRZSEbFq+lrUxMG7JfOmUguyM9tuGAO9c4e0fJVvE6Vkg91dvf+LLc8ziane3YWQqplCZmcnR0cGFsKDCbXo3cgIgHaG69x7zD7jOWjy99PbwyuXzrBvWK1i/JSpYsfWLXKIH+z8KtPJdfs917xddpTLnkut+S9Fjb9kRuNxr2IsdaxLqeLhz1qAU4qPhjaDdu6Upo3f3PBpY3cvljhb49B9MjQ1PRMmuejceNd5O7alZA7/KMb33mpmD1HXzFtvm+uTiVFHYc9umNCzaC7x/CDPca5Yt2nJbjl6YHcXtX4KO89mnX79iaZvXbl2o/EFJVypqFE3B6+ai5ERgHyN3dLGGPfX82aGOcOJHanuHPRoC5E2XZhEduJwRntwSybRM7/3iF1dz6uIUUqZF6xQadImi1rKUCUh5cGcUdqsEKOeca5ob5YnvHktrWfjRIsemGON0PbsA4UD68Rld76MlbKY23jkEcFdWmk4vW/BJOP1nRdl5k5bvgjM/6l+R+7uKh7x8nvGEyOSfZn9S2H3/0ybTihgcMEbKW169vVj6o84e8605Y+lUgtvl23UeCUoESrvhbJkpUlqbIZszPUQc75tMja1Z+AM+xeKE05pJliYwvPAHLzU+MjBE0cVDMf5JbziNLT2Lq5Rzb2cqUZYV27W734BnfokeklI7B8FkDfY2G8msYDs53K5+EDEB4/eOokRG0Z95s344UQcY7gUzE/kS9juODMIu7uw1OEJHJ+4hIWP7v6j1AZg8gTMH2/OjGWfr7Mbzt7T9xpDQzfPbV9+AttolxthWKN2STNc7SDmIGHIooPbs/UZRADvC5xhOoR0hrPQ+IdZSfvvRMuUE7AV8FRoRDIG8VFrYDx3l9OzVR14VZnzm8PgTbsITs+TWzYhBE4PNqL73kI4u2R8PQLBHMQxCKZ1OISA/VRzIQNK4E1Dv9yG4cTgQKsz5aFXrm//yEYEF0m943Ag6LuJM4MYJffk7i454QWmzwnS9BWtoMd3YUIRggthscEtZR2wFd5k6dwRFAA0nHOGocmV7tXtQcPjDAXTnAVb4PNBtIZaQDGImk+6RaLOUOBbOXEBHmx7r6drBUy8FRACmp16eh9GsMsjUD4Eh/nHed0kHQ75lDvQavryG2HSnOy4/exXKbPahWbHS/YXrtu4ZKcKeaYcaH2TgkRTyDO8/zLVygkKUlIesJUcdcrs6ct/SILoNnldi6QZshawBsPQznBz/X1cyyPDmvJUBxAJy4Tg238Ela4EY7BuYCJh9afr9IEQt0uAwbyCvH6r/gnoACFY6LtOGgxPVamFIIn+ZhAtusAcP/lbyPcQAi3FRIcEFci8nLSaM23ZRQhlzs9k9+MlGiXtfu7ugmb/6ZpNi1cU7O5qlyO/cBoyN9VZo09wvHxBCg7Ysuo+ixMkPsk2KSi1IsLICoB2hmnC+OLWwplh2QVqunxtJ/f8QjJ+JnY88qKMipS/7LkcmuS3VU75WpXxg/kOkKHhXAj/C7T/AVxKPr4p9DT9vmKNb7tMrpWi/3CIJs2889pvOAZvb500abiJPzoFu7sO/Kk/681isZYWtbuLzMxzQaHhL8GJb7PhQ4QKkjw0yzBunfeRVUfJRXc1miMohXR0dyp5EqzdkWt5cgvcAoNIwcl3TDE6kpI4c7SWicuXZerQAlftd/VYcbUszTksp8baiLNh/8Pkw1ivRkSFD0ZFzN6tFxPbJueOfKm+xcE1ZWhhcDZyDDqYZEZ+AmczoFlxed/ALjDpx3iOeWfSqh+F1R480CqKl+Aq0MiFZW/6M2/vWrJHMn0Kdj/mDtQosuIkoLaSgoSyYXDUAVt2wxivv/9OYhRsj4zAsRjn6PuwxqJLV/dEOcNPPPIspjTuyXOGNTTdCe387sTpZQ/Kh7Vyfke7wSSY3ERDgav2ox1YjXtl3zwfSUaGunbi9MELgxGR/Sc+fBfUfggL49QL7BGQI4FaC1RZOxWUDmZbfZ60QGbkJzh1QR6FWAwq+Kkkf86M5d+E3T89ow6yLWWSZOuTo3FgeObbqx9e/EsyP9sg3Ffufo+KFhnelaOShzVCkPogKlE8yc3xPBzrTPz2wNcYLWLUqBi/Su8Pjq2Zc4b9NVDw5wHJsE6qDSCGe7PsRGXLnsP6LUcTPBhj9dkPYzMOTl6W7eaPOGH1wvKwetlv8ixrkdzWmbQtkdEmfFjxEnl0itE3t3v7Xeb4SV/Fsudv0QdADZo82DuhI0PiAUjbKZgboJlEeg1P+AAAoZeC8CQ1PbQ54YtZ05adA0n8EK8hgr/FEST3c78vywQCAYs0Za9M0+6//kyc7MBDcolTKSbE7q56hjy7sLvrK4Sdf6BV7trf/w1Mm51vm4njh9hFZvMQLdu0r5l9+vJfYh/ylnyBIvxK08ERAO0MY00PfgTiaTjDJ+J9qj3DCmMwpXR+ez3bukNmDWfmt5AKNo65b+Vr5f/qEtAzcaCzk2mWAeucSVUdNKnZsa3T29F5LegxHk7xJXlCoCNDE42WST/2e7ZfELSiR4pKG4UBAgGC5oEdcr+s3Jz2NWNj0VobAN8CRj0JB8/Kx1yZibX8v0Ovr1iXXtSlyv6rm04bzlXtyxv7HLU0HfV4XEUUD2FNH3d39WZMU8jdXdJ36MSq4Fwy/BSEKpX+4l7gcanaRSbhAWzo2+LwwFHSNj3/zo4p3z955dYFvcOZKQ7TxDn0anih1vJILQYbFxSBNs7XaJz5ZXN3ice2vgxVA6Km8p8PDxXa29ydVvXHyyCKRZzl0D08ZHTtTr5kgcjQpYD9G8wHcATQ8IPIUOJ8c3zbtSw2jMiQGlk990mcQ/oTttm+O2VQqyOqcir8rW2ItZ+EcGOWM7j88Jp5YPBtLMOyqfa0fEHvEuP6AEIuCkQHKJQRibu7eBSpfyVMn+cYNeKIUlw4Bdh8xl1kmCj/EkwcFtF0KC5OW9HGKJGBw/y+JiuxRhZIyxFyUNlyMg6WAOQ2soAVbwu0HYdOxeRSQ2HO2PdUh8rBvLIyfHnVf7ABUr5rnOVXWbMlS3tK0EEE04JTjJ9c4pk6hZEhHgP/Zatl0uXVR4bUPgwwotT+LS0dSW3SYPXtKuzYEnA+ydRsO/dhHhmYZdgLxOld6bzKiS/xE2x6Z3aEkvK5u8vOZntvW7tpyXqOIHqPACsVJz5LYS5gzaZF/w5z6edcIYoykUKAZzgv6EAWP5Jxydxpy66gEBG3Yrjl3B88ASCxODOMX1mEFOc7w4ggYEmw7+5w9NHigclUTgeKyjDiwMS//PAF6W9e59/n5+vyOk9/qzoSKrIKtziyDJJcw5Hfrsou5y/te/o6j2951fMNrJqVIGlSaE2JGBEtRfwIyNggMlSWU4xoSgBMokHzJ1hJ2NyMc8qQZp9+/VTY3B+Gtkdb2Dc9ONXzGcvMmbpsCh77maakHAXgwP+WZhITMCYB5EfdiqxtyoNsnx81xp3HMu1pSXdeRqaeDnXOP7yhmRnnwOs4FEsrgxx83Q6/AciEU0wBvXHW5B8cR8c6cOgj2wh7cDAFIDczDG5fKZEx5MQJfikFdPWDzd9kCNXBMHxL5/FseZpX1GRca8+IwsA3r/Pv9XOdx3udp791HYQrATdnJMuwqvrdMBxjINvjlz7bvpKNOzoytHNbFyYGZ0laYC9hgIsaeQAb/3/BZSN5TnEILVIqz09ypAINiJpfB1gEp46GEcfJMqZnncyz9vGc/BQxsmEfKMoAzFhW2t//V1kOP+H0WkCqhIWfHdD/QB6DjOti2wcwvoSH58pRowxzluYRy/LcfxgEM4ExyW1LmLkWdEs4LNKw5e4knElaj3nFH3MECUysiL7Ibg/6QwocvBSENZ2ecx4UE+7H+h/GwXEAeiZrindYj0lEBnaHlY+X+uE+/LaW/8/4NcVr8T6x3RAvtVBjlw8vtKQHprSflI/qnNe9bKIdv1BJQQMDoQ8uftDOCZis0hFMnVdkuj3bbkG06lHo/1HgeI4+5E+8UEyVm/zVGQwHKlEDhqSUqtP/jhe9ujcmqbrgEz9re31GDyt8cO973LSqGZgYZfGLKrtnj2zf8Pb+xvGcKeDygjMb8SM5+KEXB4ezm6/fvHXxLuWcqpCnarL0X2pxMnJq08IH5k69fgJ2isLMNxy4cKFIYmQ0sOnGNYXV8HTbOxtFl9gDgqGR0OKhjZdfMrR6zTOJT8TLHbKt4dQdEnhegaHaGep5HqhBl1TZiokHPcplDAc+d1lJTYmfI51g2dbjiL2zPcLkpzAhUIEBDidyZCes2fLZHSqKM9iRLayk7oYTmZFCUMaoEdJuxbQZ3OkQqCOSRX+ApgJXTvJbac1qmT9AMWXC4CQTjUzKxzEff7Ym+9AOZgq0cLUY5MPVptQAjcisQwmIarkYzgDuBpiTH2/O9KX3YN/uJ/Dj170YLhmy0fxAu6gPxxM29GX33YMY/rlFzM+ZY6u5eVzufe3evdPgvf4Oi/hUQhK2p2Gxnr7W3/mwmAfHnqNTDp/856WuD64JlI9J5E8N5Req9BrMV4bDVSnU0PKh+KdDi1aUGQq3IgiqcDQcMolk9KRnzwGD/7oh0fRefLOeNrEs5DVAMF6o86y5fMC1O9iiqJOcOdY3I/E9XAEqFyct8eWWj8u9TSigNfr89uuOdJz65VCeF9hWvdywgBAobf6fIiJzFZ1SXfZt0vWCbsQCUECO/183+Yw9d+qyY7Fg8WRSwHSNXfwlSF7nl+F9nGIKvK0oQGeVTF7cKebxWXH+2+0+HgHebm+0yv4w8iLahWJ4+FG4L8/ZrrK9uFpMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgrEFIgpEFMgpkBMgZgCMQViCsQUiCkQUyCmQEyBmAIxBWIKxBSIKRBTIKZATIGYAjEFYgoMiwL/B0EimzaLfyxmAAAAAElFTkSuQmCC"}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.209b0c83.js b/assets/js/runtime~main.109933d1.js similarity index 99% rename from assets/js/runtime~main.209b0c83.js rename to assets/js/runtime~main.109933d1.js index 2575ffca85..27a80f60b6 100644 --- a/assets/js/runtime~main.209b0c83.js +++ b/assets/js/runtime~main.109933d1.js @@ -1 +1 @@ -(()=>{"use strict";var e,d,c,b,a,f={},r={};function t(e){var d=r[e];if(void 0!==d)return d.exports;var c=r[e]={exports:{}};return f[e].call(c.exports,c,c.exports,t),c.exports}t.m=f,e=[],t.O=(d,c,b,a)=>{if(!c){var f=1/0;for(i=0;i=a)&&Object.keys(t.O).every((e=>t.O[e](c[o])))?c.splice(o--,1):(r=!1,a0&&e[i-1][2]>a;i--)e[i]=e[i-1];e[i]=[c,b,a]},t.n=e=>{var d=e&&e.__esModule?()=>e.default:()=>e;return t.d(d,{a:d}),d},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,t.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var a=Object.create(null);t.r(a);var f={};d=d||[null,c({}),c([]),c(c)];for(var r=2&b&&e;"object"==typeof r&&!~d.indexOf(r);r=c(r))Object.getOwnPropertyNames(r).forEach((d=>f[d]=()=>e[d]));return f.default=()=>e,t.d(a,f),a},t.d=(e,d)=>{for(var c in d)t.o(d,c)&&!t.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:d[c]})},t.f={},t.e=e=>Promise.all(Object.keys(t.f).reduce(((d,c)=>(t.f[c](e,d),d)),[])),t.u=e=>"assets/js/"+({4:"642fde82",37:"4504686e",46:"69c2ba4e",53:"935f2afb",87:"1dc33e9c",116:"f88d2135",134:"9d90cc60",149:"1e115aba",153:"e133680b",221:"9eeb107e",247:"2f1cd5d9",342:"d661e85d",370:"085ffbd7",409:"1cf06b0d",459:"c1ab566a",464:"1ef24e58",550:"bf9dc990",559:"bbaff741",763:"3483a674",821:"45db1732",911:"2bc54e2b",913:"73e252ed",1014:"3c066edc",1037:"886ca5ea",1056:"c32ed07a",1059:"64f3f413",1065:"335cbbbb",1067:"cefa0e41",1280:"710646cd",1284:"b51fdc8c",1309:"872db8be",1317:"098330c0",1324:"0b9646e8",1348:"db1d58d4",1356:"899fdd9f",1358:"49623f30",1367:"85d526b8",1380:"21bf2b11",1389:"5d14e41e",1396:"c3151dc9",1567:"51a7d035",1601:"55ccde6e",1617:"96c61e5d",1629:"99cb7080",1638:"4843ea13",1698:"4141cbdd",1701:"e7e99a29",1727:"7f1e28a5",1735:"00d7f298",1743:"c65a5e23",1849:"f5985471",1906:"2c06bb79",1924:"c2cefeac",1966:"d3f14484",2066:"97a92876",2115:"f8bde386",2124:"e14e08fc",2163:"0545e01d",2206:"d888d60e",2218:"fd0bff62",2250:"f242a466",2302:"3bb72ea1",2341:"9daaef4f",2399:"70a5ec54",2421:"169d51e4",2451:"8632c9a0",2525:"163f4d81",2536:"c98631ab",2551:"6b968562",2626:"099d618a",2649:"f8c0f22b",2659:"730e35c4",2684:"ad08431a",2801:"470c62dd",2925:"94686630",2930:"b949b843",2970:"df84b43c",3007:"af72bfd1",3015:"129ce714",3019:"8083ca96",3106:"b0c5f580",3108:"2c83c4e0",3114:"552ba15d",3156:"9d845a85",3165:"b88966c9",3171:"d119b4b9",3186:"6b006b96",3237:"1df93b7f",3294:"0c264ecc",3334:"3962b4b6",3335:"f19dd2c1",3375:"fb01d865",3451:"6410f0dc",3459:"7654669a",3551:"71898787",3610:"584963dc",3659:"994c0519",3721:"aa02f6e6",3723:"e1c2ddaa",3743:"e9b9d3de",3751:"3720c009",3752:"c1e8360a",3762:"2d271c04",3789:"f1811081",3814:"b8f4daa2",3820:"f23cc535",3878:"c551111a",3892:"81b2d599",3901:"265284fc",4001:"45c1b0fa",4015:"d329623b",4074:"0ec1cc91",4092:"0c9e8015",4102:"bbc4d579",4121:"55960ee5",4195:"14612fe5",4230:"e65f8228",4328:"ba6f6003",4361:"e1610694",4479:"b3b34ca6",4497:"5c25e952",4506:"879b2dca",4518:"6e373ae3",4597:"64867942",4641:"1d1b3f81",4698:"29eff2aa",4705:"106fc9f0",4802:"9c28af5e",4821:"a8be125d",4845:"d63023de",4931:"7f823891",4936:"6c272831",4974:"6980e26f",5037:"e7f62945",5044:"9f69dad8",5050:"e8e1f04a",5100:"a5b41932",5146:"28b9548b",5201:"31350f16",5290:"b400c9cd",5307:"b3fefca0",5312:"1341f4ef",5426:"51ede774",5500:"c414f47b",5516:"18edca16",5550:"fba0c1ab",5599:"5a6c6630",5615:"23200c1b",5653:"03506951",5658:"e1d45d33",5683:"a5c3d9e9",5745:"d866cdfa",5784:"110bbf9d",5805:"d3555a77",5815:"165403c2",5853:"b0c8cd2e",5928:"a3dbe69d",5979:"46c09b1c",5980:"c1bb9856",5998:"b2c8b850",6023:"eed077d2",6045:"d7f02a39",6047:"12853b3b",6082:"ccdc297e",6170:"9a2b95c5",6202:"862d5cd0",6216:"6c249fdd",6221:"434536ad",6347:"16138ba5",6365:"2d198a95",6425:"18050ae3",6470:"c5c6e5e3",6487:"eaf8982a",6502:"13ca8dc8",6517:"84711cce",6553:"2601a326",6584:"e83211e5",6615:"50b6ea97",6657:"965be803",6667:"0c306a96",6709:"2123ba4c",6779:"4c591bd5",6835:"9e5bb348",6919:"8ec5e180",6972:"07a6fe21",6973:"214a573e",6980:"e49a9233",7119:"fd375cb3",7128:"eb92c419",7269:"9f54a902",7292:"8fea5263",7338:"613fe82a",7367:"aa68f0e9",7400:"4c958ddc",7422:"44556e56",7466:"46b3dd76",7470:"5450f727",7502:"3b0e0d3a",7530:"d9fa573c",7599:"cd548de4",7607:"1756b4ab",7615:"bfe77eb6",7642:"90e92e2d",7644:"bc14dfd3",7676:"ba498387",7678:"d9a0a71f",7682:"fd091685",7719:"2b69ed49",7729:"ddd418a4",7809:"2cac92cf",7811:"8d1d1c2e",7813:"8632df87",7826:"7a460b64",7880:"bda60f59",7894:"ed35ad37",7903:"59bac03b",7918:"17896441",7919:"78895142",7992:"0abd8cd0",8003:"8460a6e0",8006:"53b913eb",8087:"405b4698",8145:"83a556d4",8252:"d69fc27c",8255:"dc17824a",8314:"18719bcf",8338:"223019eb",8345:"82b6bb4d",8359:"3b168288",8361:"0f0e7497",8403:"2b53ff98",8454:"f9ed4a2e",8464:"b59188bc",8477:"f251a97b",8560:"f7282e3d",8562:"c42ebdb1",8563:"b697db3c",8643:"f2d3ddf1",8681:"9a2aa159",8712:"c3e7abab",8760:"389b810a",8851:"5ef34981",8879:"3d36e868",8922:"0b7ca231",8962:"cb3ee170",8969:"5420bc6f",8997:"4c484ee6",9008:"ab704c68",9012:"f3dbe95e",9023:"651422d5",9032:"27dc3412",9045:"94f8b490",9047:"84ad0742",9074:"4393e4bc",9156:"70771f47",9184:"9918870c",9190:"9d037edf",9231:"4458e1ed",9290:"a6e0ba43",9342:"5b01a645",9353:"b70c40d3",9355:"d5484ed9",9461:"47016385",9514:"1be78505",9566:"e1a3fa89",9572:"7a1dcf5b",9589:"95a8df98",9590:"88c087fa",9605:"81fab8ae",9627:"eaf6f8ac",9636:"0776bfb3",9677:"67747c0c",9688:"e368a9fb",9743:"0fdc98ff",9769:"f4457846",9774:"42f12bbb",9783:"11166ec1",9884:"92dce1fb",9924:"df203c0f",9933:"16ae7048",9948:"531549fe",9993:"0566ee4e"}[e]||e)+"."+{4:"4e9a5cf9",37:"d0c0ee9a",46:"ee8a0396",53:"169bbb64",87:"f0ca7f41",116:"d88674bc",134:"3443cf4b",149:"993e917d",153:"d9388ba1",221:"bb3028d5",247:"5db2e7c5",342:"f08e820c",370:"4f5184c5",409:"e05c44db",459:"17ea8335",464:"c3e3948c",486:"e1f777fb",550:"263f2266",559:"e300b666",763:"9eb891ab",821:"84effaa1",911:"34bfcbb8",913:"f1c452f7",1014:"0a1bf1be",1037:"868865cd",1056:"2e85c605",1059:"8316bb33",1065:"8346d600",1067:"b2cf6a34",1280:"3459eb36",1284:"0865407d",1309:"1e5a725d",1317:"8fc6a14e",1324:"ff62103b",1348:"852747df",1356:"58bab6f5",1358:"389df322",1367:"28dc15ee",1380:"7a3f0c91",1389:"abe02502",1396:"304f611b",1567:"056a69b4",1601:"4739d856",1617:"cab302d2",1629:"326918e6",1638:"21a32d60",1698:"e8e5f480",1701:"496e2f5c",1727:"a79eae31",1735:"ff737b29",1743:"f04fae71",1849:"db81de9d",1906:"e3288415",1924:"7d9cb8d2",1966:"0cbc9668",2066:"928fa6de",2115:"2e8e7d9d",2124:"f14034a8",2163:"5dc8a79a",2206:"7ef85fe4",2218:"beff1ed1",2250:"d9da0a6b",2302:"0e0d5e90",2341:"45273b48",2399:"931b3631",2421:"f2d84b1d",2451:"5c5ad60e",2525:"b4127711",2536:"13eda9d8",2551:"dd1b8c40",2626:"fcfd7367",2649:"5ef379ab",2659:"6e79b1a4",2684:"4864a6cb",2801:"c6d06eca",2925:"935697c2",2930:"a85eac6d",2970:"5655a6d5",3007:"24045c19",3015:"73c7f255",3019:"86f9630c",3106:"00ea1bab",3108:"cca8f410",3114:"4b111030",3156:"4ad87429",3165:"cc1265f7",3171:"c7342b03",3186:"24413443",3237:"81ec8653",3294:"94d4a9ce",3334:"9d00ae76",3335:"e360db32",3375:"e5853741",3451:"cd53069a",3459:"3eb76891",3551:"d0fc83fd",3610:"b42ac234",3614:"73cf28ce",3659:"23a46961",3721:"3003829e",3723:"278993e4",3743:"1a035772",3751:"f2161fcd",3752:"7c3fe38f",3762:"d43618bf",3789:"04b45f76",3814:"00465c0b",3820:"bc4e176e",3878:"06a383c6",3892:"f4ebdcb0",3901:"9c94f8a6",3941:"2712e828",4001:"b3da0a62",4015:"6b26d853",4074:"dc1e8ab4",4092:"9986218e",4102:"86396385",4121:"cd5b5b3d",4195:"98eaf8b4",4230:"1064a11b",4328:"2c52c5d1",4361:"63327a55",4479:"2fdf126a",4497:"4f86a8cd",4506:"0b055596",4518:"feb05cf8",4597:"3da8bb1f",4641:"9361363c",4698:"33cac92c",4705:"151c48c7",4802:"d1361500",4821:"a593b19a",4845:"07da2c57",4931:"138d3cc2",4936:"f4446c95",4974:"6744cb79",5037:"a5fbff25",5044:"8b6ab0e9",5050:"59acae77",5100:"2ef7028b",5146:"ce7a1a3f",5201:"5cd52e38",5290:"fc6b4f52",5307:"f3384d70",5312:"09b14ea1",5426:"c006e06c",5500:"0192623f",5516:"68bc200c",5550:"dc2fa33a",5599:"42ce2c0e",5615:"7c22b5ad",5653:"fffdcf8c",5658:"9339814d",5683:"88e72369",5745:"bfd2a379",5784:"33269912",5805:"dcfadf8a",5815:"89e39bb2",5853:"3f8abe98",5928:"e978628a",5979:"333d8b8c",5980:"35d65e2c",5998:"ac6136bc",6023:"0efb6e8e",6045:"6ce347a1",6047:"ec3a843a",6066:"c78f7afc",6082:"27d28384",6170:"250d0906",6202:"06700d7f",6216:"318d92bd",6221:"a26cfee2",6347:"15eb26a9",6365:"42453980",6425:"572fcc59",6470:"710da58a",6487:"a5a27f95",6502:"b8492e6e",6517:"bd2d99fa",6553:"4e8a8e33",6584:"29f7bcc9",6615:"f9943408",6657:"4bdbf84c",6667:"206b2b98",6709:"666927c0",6779:"8f108577",6835:"82e579da",6919:"7696289d",6972:"cac13a08",6973:"1c7a3e5e",6980:"d1437360",7119:"af66ebc9",7128:"1827468e",7269:"795207ca",7292:"440c7a8f",7338:"fbfcfbdd",7367:"d38a5a68",7400:"b4b19ede",7422:"d9cc4236",7466:"392273b9",7470:"90217184",7502:"e8cba6bf",7530:"f13da92a",7599:"f886e7df",7607:"cab935d5",7615:"3dbe520b",7642:"1f3dc6ff",7644:"f500f40c",7676:"e503482c",7678:"d4ff5222",7682:"9efffa84",7719:"57721fbf",7729:"2dadf504",7809:"be4fcdc4",7811:"02da5a1f",7813:"c7338115",7826:"d6831948",7880:"95b22506",7894:"8ff7e182",7903:"ff68a5db",7918:"15777b09",7919:"8f492131",7992:"9d60d6f4",8003:"42f379cf",8006:"d3f12a53",8087:"56b50186",8145:"965f9317",8252:"6ccc4e7c",8255:"31ef19d5",8314:"80673733",8338:"e0cb42b2",8345:"194edf9f",8359:"de2d826f",8361:"a248ce8b",8403:"e3b9b680",8454:"a5c12fe6",8464:"294ce184",8477:"089ede95",8560:"0cb961b9",8562:"ee22c58f",8563:"8b0dbeb3",8643:"600d2692",8681:"a9e4a279",8712:"83f2098b",8760:"bcd65bbf",8851:"64a198a0",8879:"4a107983",8922:"eb31162e",8962:"5e8cc09c",8969:"57d1741e",8997:"f9098a98",9008:"3a3236cf",9012:"bba1cb95",9023:"3197d37b",9032:"626fbd8d",9045:"f1f2e0a7",9047:"c8a1eef8",9074:"c4d79b00",9156:"b017c60f",9184:"6591c182",9190:"6e82c14f",9231:"8db9d8a3",9290:"c3097b34",9342:"101a6b44",9353:"af0421a4",9355:"9dab3496",9461:"a14ffebf",9514:"550fc046",9566:"0bf155e1",9572:"bac2a656",9589:"9203f42d",9590:"d06e51af",9605:"d90a6d5c",9627:"e4163fc8",9636:"fbb5fd77",9677:"3b504a1c",9688:"878c8e9a",9743:"aad68954",9769:"2ac0d29b",9774:"f24c9567",9783:"4fa12fee",9884:"8a7719f5",9924:"5f4bb7b3",9933:"4f71a059",9948:"9af8a21d",9993:"9efb00bc"}[e]+".js",t.miniCssF=e=>{},t.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),t.o=(e,d)=>Object.prototype.hasOwnProperty.call(e,d),b={},a="redis-developer-hub:",t.l=(e,d,c,f)=>{if(b[e])b[e].push(d);else{var r,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var a=b[e];if(delete b[e],r.parentNode&&r.parentNode.removeChild(r),a&&a.forEach((e=>e(c))),d)return d(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},t.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.p="/",t.gca=function(e){return e={17896441:"7918",47016385:"9461",64867942:"4597",71898787:"3551",78895142:"7919",94686630:"2925","642fde82":"4","4504686e":"37","69c2ba4e":"46","935f2afb":"53","1dc33e9c":"87",f88d2135:"116","9d90cc60":"134","1e115aba":"149",e133680b:"153","9eeb107e":"221","2f1cd5d9":"247",d661e85d:"342","085ffbd7":"370","1cf06b0d":"409",c1ab566a:"459","1ef24e58":"464",bf9dc990:"550",bbaff741:"559","3483a674":"763","45db1732":"821","2bc54e2b":"911","73e252ed":"913","3c066edc":"1014","886ca5ea":"1037",c32ed07a:"1056","64f3f413":"1059","335cbbbb":"1065",cefa0e41:"1067","710646cd":"1280",b51fdc8c:"1284","872db8be":"1309","098330c0":"1317","0b9646e8":"1324",db1d58d4:"1348","899fdd9f":"1356","49623f30":"1358","85d526b8":"1367","21bf2b11":"1380","5d14e41e":"1389",c3151dc9:"1396","51a7d035":"1567","55ccde6e":"1601","96c61e5d":"1617","99cb7080":"1629","4843ea13":"1638","4141cbdd":"1698",e7e99a29:"1701","7f1e28a5":"1727","00d7f298":"1735",c65a5e23:"1743",f5985471:"1849","2c06bb79":"1906",c2cefeac:"1924",d3f14484:"1966","97a92876":"2066",f8bde386:"2115",e14e08fc:"2124","0545e01d":"2163",d888d60e:"2206",fd0bff62:"2218",f242a466:"2250","3bb72ea1":"2302","9daaef4f":"2341","70a5ec54":"2399","169d51e4":"2421","8632c9a0":"2451","163f4d81":"2525",c98631ab:"2536","6b968562":"2551","099d618a":"2626",f8c0f22b:"2649","730e35c4":"2659",ad08431a:"2684","470c62dd":"2801",b949b843:"2930",df84b43c:"2970",af72bfd1:"3007","129ce714":"3015","8083ca96":"3019",b0c5f580:"3106","2c83c4e0":"3108","552ba15d":"3114","9d845a85":"3156",b88966c9:"3165",d119b4b9:"3171","6b006b96":"3186","1df93b7f":"3237","0c264ecc":"3294","3962b4b6":"3334",f19dd2c1:"3335",fb01d865:"3375","6410f0dc":"3451","7654669a":"3459","584963dc":"3610","994c0519":"3659",aa02f6e6:"3721",e1c2ddaa:"3723",e9b9d3de:"3743","3720c009":"3751",c1e8360a:"3752","2d271c04":"3762",f1811081:"3789",b8f4daa2:"3814",f23cc535:"3820",c551111a:"3878","81b2d599":"3892","265284fc":"3901","45c1b0fa":"4001",d329623b:"4015","0ec1cc91":"4074","0c9e8015":"4092",bbc4d579:"4102","55960ee5":"4121","14612fe5":"4195",e65f8228:"4230",ba6f6003:"4328",e1610694:"4361",b3b34ca6:"4479","5c25e952":"4497","879b2dca":"4506","6e373ae3":"4518","1d1b3f81":"4641","29eff2aa":"4698","106fc9f0":"4705","9c28af5e":"4802",a8be125d:"4821",d63023de:"4845","7f823891":"4931","6c272831":"4936","6980e26f":"4974",e7f62945:"5037","9f69dad8":"5044",e8e1f04a:"5050",a5b41932:"5100","28b9548b":"5146","31350f16":"5201",b400c9cd:"5290",b3fefca0:"5307","1341f4ef":"5312","51ede774":"5426",c414f47b:"5500","18edca16":"5516",fba0c1ab:"5550","5a6c6630":"5599","23200c1b":"5615","03506951":"5653",e1d45d33:"5658",a5c3d9e9:"5683",d866cdfa:"5745","110bbf9d":"5784",d3555a77:"5805","165403c2":"5815",b0c8cd2e:"5853",a3dbe69d:"5928","46c09b1c":"5979",c1bb9856:"5980",b2c8b850:"5998",eed077d2:"6023",d7f02a39:"6045","12853b3b":"6047",ccdc297e:"6082","9a2b95c5":"6170","862d5cd0":"6202","6c249fdd":"6216","434536ad":"6221","16138ba5":"6347","2d198a95":"6365","18050ae3":"6425",c5c6e5e3:"6470",eaf8982a:"6487","13ca8dc8":"6502","84711cce":"6517","2601a326":"6553",e83211e5:"6584","50b6ea97":"6615","965be803":"6657","0c306a96":"6667","2123ba4c":"6709","4c591bd5":"6779","9e5bb348":"6835","8ec5e180":"6919","07a6fe21":"6972","214a573e":"6973",e49a9233:"6980",fd375cb3:"7119",eb92c419:"7128","9f54a902":"7269","8fea5263":"7292","613fe82a":"7338",aa68f0e9:"7367","4c958ddc":"7400","44556e56":"7422","46b3dd76":"7466","5450f727":"7470","3b0e0d3a":"7502",d9fa573c:"7530",cd548de4:"7599","1756b4ab":"7607",bfe77eb6:"7615","90e92e2d":"7642",bc14dfd3:"7644",ba498387:"7676",d9a0a71f:"7678",fd091685:"7682","2b69ed49":"7719",ddd418a4:"7729","2cac92cf":"7809","8d1d1c2e":"7811","8632df87":"7813","7a460b64":"7826",bda60f59:"7880",ed35ad37:"7894","59bac03b":"7903","0abd8cd0":"7992","8460a6e0":"8003","53b913eb":"8006","405b4698":"8087","83a556d4":"8145",d69fc27c:"8252",dc17824a:"8255","18719bcf":"8314","223019eb":"8338","82b6bb4d":"8345","3b168288":"8359","0f0e7497":"8361","2b53ff98":"8403",f9ed4a2e:"8454",b59188bc:"8464",f251a97b:"8477",f7282e3d:"8560",c42ebdb1:"8562",b697db3c:"8563",f2d3ddf1:"8643","9a2aa159":"8681",c3e7abab:"8712","389b810a":"8760","5ef34981":"8851","3d36e868":"8879","0b7ca231":"8922",cb3ee170:"8962","5420bc6f":"8969","4c484ee6":"8997",ab704c68:"9008",f3dbe95e:"9012","651422d5":"9023","27dc3412":"9032","94f8b490":"9045","84ad0742":"9047","4393e4bc":"9074","70771f47":"9156","9918870c":"9184","9d037edf":"9190","4458e1ed":"9231",a6e0ba43:"9290","5b01a645":"9342",b70c40d3:"9353",d5484ed9:"9355","1be78505":"9514",e1a3fa89:"9566","7a1dcf5b":"9572","95a8df98":"9589","88c087fa":"9590","81fab8ae":"9605",eaf6f8ac:"9627","0776bfb3":"9636","67747c0c":"9677",e368a9fb:"9688","0fdc98ff":"9743",f4457846:"9769","42f12bbb":"9774","11166ec1":"9783","92dce1fb":"9884",df203c0f:"9924","16ae7048":"9933","531549fe":"9948","0566ee4e":"9993"}[e]||e,t.p+t.u(e)},(()=>{var e={1303:0,532:0};t.f.j=(d,c)=>{var b=t.o(e,d)?e[d]:void 0;if(0!==b)if(b)c.push(b[2]);else if(/^(1303|532)$/.test(d))e[d]=0;else{var a=new Promise(((c,a)=>b=e[d]=[c,a]));c.push(b[2]=a);var f=t.p+t.u(d),r=new Error;t.l(f,(c=>{if(t.o(e,d)&&(0!==(b=e[d])&&(e[d]=void 0),b)){var a=c&&("load"===c.type?"missing":c.type),f=c&&c.target&&c.target.src;r.message="Loading chunk "+d+" failed.\n("+a+": "+f+")",r.name="ChunkLoadError",r.type=a,r.request=f,b[1](r)}}),"chunk-"+d,d)}},t.O.j=d=>0===e[d];var d=(d,c)=>{var b,a,f=c[0],r=c[1],o=c[2],n=0;if(f.some((d=>0!==e[d]))){for(b in r)t.o(r,b)&&(t.m[b]=r[b]);if(o)var i=o(t)}for(d&&d(c);n{"use strict";var e,d,c,b,a,f={},r={};function t(e){var d=r[e];if(void 0!==d)return d.exports;var c=r[e]={exports:{}};return f[e].call(c.exports,c,c.exports,t),c.exports}t.m=f,e=[],t.O=(d,c,b,a)=>{if(!c){var f=1/0;for(i=0;i=a)&&Object.keys(t.O).every((e=>t.O[e](c[o])))?c.splice(o--,1):(r=!1,a0&&e[i-1][2]>a;i--)e[i]=e[i-1];e[i]=[c,b,a]},t.n=e=>{var d=e&&e.__esModule?()=>e.default:()=>e;return t.d(d,{a:d}),d},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,t.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var a=Object.create(null);t.r(a);var f={};d=d||[null,c({}),c([]),c(c)];for(var r=2&b&&e;"object"==typeof r&&!~d.indexOf(r);r=c(r))Object.getOwnPropertyNames(r).forEach((d=>f[d]=()=>e[d]));return f.default=()=>e,t.d(a,f),a},t.d=(e,d)=>{for(var c in d)t.o(d,c)&&!t.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:d[c]})},t.f={},t.e=e=>Promise.all(Object.keys(t.f).reduce(((d,c)=>(t.f[c](e,d),d)),[])),t.u=e=>"assets/js/"+({4:"642fde82",37:"4504686e",46:"69c2ba4e",53:"935f2afb",87:"1dc33e9c",116:"f88d2135",134:"9d90cc60",149:"1e115aba",153:"e133680b",221:"9eeb107e",247:"2f1cd5d9",342:"d661e85d",370:"085ffbd7",409:"1cf06b0d",459:"c1ab566a",464:"1ef24e58",550:"bf9dc990",559:"bbaff741",763:"3483a674",821:"45db1732",911:"2bc54e2b",913:"73e252ed",1014:"3c066edc",1037:"886ca5ea",1056:"c32ed07a",1059:"64f3f413",1065:"335cbbbb",1067:"cefa0e41",1280:"710646cd",1284:"b51fdc8c",1309:"872db8be",1317:"098330c0",1324:"0b9646e8",1348:"db1d58d4",1356:"899fdd9f",1358:"49623f30",1367:"85d526b8",1380:"21bf2b11",1389:"5d14e41e",1396:"c3151dc9",1567:"51a7d035",1601:"55ccde6e",1617:"96c61e5d",1629:"99cb7080",1638:"4843ea13",1698:"4141cbdd",1701:"e7e99a29",1727:"7f1e28a5",1735:"00d7f298",1743:"c65a5e23",1849:"f5985471",1906:"2c06bb79",1924:"c2cefeac",1966:"d3f14484",2066:"97a92876",2115:"f8bde386",2124:"e14e08fc",2163:"0545e01d",2206:"d888d60e",2218:"fd0bff62",2250:"f242a466",2302:"3bb72ea1",2341:"9daaef4f",2399:"70a5ec54",2421:"169d51e4",2451:"8632c9a0",2525:"163f4d81",2536:"c98631ab",2551:"6b968562",2626:"099d618a",2649:"f8c0f22b",2659:"730e35c4",2684:"ad08431a",2801:"470c62dd",2925:"94686630",2930:"b949b843",2970:"df84b43c",3007:"af72bfd1",3015:"129ce714",3019:"8083ca96",3106:"b0c5f580",3108:"2c83c4e0",3114:"552ba15d",3156:"9d845a85",3165:"b88966c9",3171:"d119b4b9",3186:"6b006b96",3237:"1df93b7f",3294:"0c264ecc",3334:"3962b4b6",3335:"f19dd2c1",3375:"fb01d865",3451:"6410f0dc",3459:"7654669a",3551:"71898787",3610:"584963dc",3659:"994c0519",3721:"aa02f6e6",3723:"e1c2ddaa",3743:"e9b9d3de",3751:"3720c009",3752:"c1e8360a",3762:"2d271c04",3789:"f1811081",3814:"b8f4daa2",3820:"f23cc535",3878:"c551111a",3892:"81b2d599",3901:"265284fc",4001:"45c1b0fa",4015:"d329623b",4074:"0ec1cc91",4092:"0c9e8015",4102:"bbc4d579",4121:"55960ee5",4195:"14612fe5",4230:"e65f8228",4328:"ba6f6003",4361:"e1610694",4479:"b3b34ca6",4497:"5c25e952",4506:"879b2dca",4518:"6e373ae3",4597:"64867942",4641:"1d1b3f81",4698:"29eff2aa",4705:"106fc9f0",4802:"9c28af5e",4821:"a8be125d",4845:"d63023de",4931:"7f823891",4936:"6c272831",4974:"6980e26f",5037:"e7f62945",5044:"9f69dad8",5050:"e8e1f04a",5100:"a5b41932",5146:"28b9548b",5201:"31350f16",5290:"b400c9cd",5307:"b3fefca0",5312:"1341f4ef",5426:"51ede774",5500:"c414f47b",5516:"18edca16",5550:"fba0c1ab",5599:"5a6c6630",5615:"23200c1b",5653:"03506951",5658:"e1d45d33",5683:"a5c3d9e9",5745:"d866cdfa",5784:"110bbf9d",5805:"d3555a77",5815:"165403c2",5853:"b0c8cd2e",5928:"a3dbe69d",5979:"46c09b1c",5980:"c1bb9856",5998:"b2c8b850",6023:"eed077d2",6045:"d7f02a39",6047:"12853b3b",6082:"ccdc297e",6170:"9a2b95c5",6202:"862d5cd0",6216:"6c249fdd",6221:"434536ad",6347:"16138ba5",6365:"2d198a95",6425:"18050ae3",6470:"c5c6e5e3",6487:"eaf8982a",6502:"13ca8dc8",6517:"84711cce",6553:"2601a326",6584:"e83211e5",6615:"50b6ea97",6657:"965be803",6667:"0c306a96",6709:"2123ba4c",6779:"4c591bd5",6835:"9e5bb348",6919:"8ec5e180",6972:"07a6fe21",6973:"214a573e",6980:"e49a9233",7119:"fd375cb3",7128:"eb92c419",7269:"9f54a902",7292:"8fea5263",7338:"613fe82a",7367:"aa68f0e9",7400:"4c958ddc",7422:"44556e56",7466:"46b3dd76",7470:"5450f727",7502:"3b0e0d3a",7530:"d9fa573c",7599:"cd548de4",7607:"1756b4ab",7615:"bfe77eb6",7642:"90e92e2d",7644:"bc14dfd3",7676:"ba498387",7678:"d9a0a71f",7682:"fd091685",7719:"2b69ed49",7729:"ddd418a4",7809:"2cac92cf",7811:"8d1d1c2e",7813:"8632df87",7826:"7a460b64",7880:"bda60f59",7894:"ed35ad37",7903:"59bac03b",7918:"17896441",7919:"78895142",7992:"0abd8cd0",8003:"8460a6e0",8006:"53b913eb",8087:"405b4698",8145:"83a556d4",8252:"d69fc27c",8255:"dc17824a",8314:"18719bcf",8338:"223019eb",8345:"82b6bb4d",8359:"3b168288",8361:"0f0e7497",8403:"2b53ff98",8454:"f9ed4a2e",8464:"b59188bc",8477:"f251a97b",8560:"f7282e3d",8562:"c42ebdb1",8563:"b697db3c",8643:"f2d3ddf1",8681:"9a2aa159",8712:"c3e7abab",8760:"389b810a",8851:"5ef34981",8879:"3d36e868",8922:"0b7ca231",8962:"cb3ee170",8969:"5420bc6f",8997:"4c484ee6",9008:"ab704c68",9012:"f3dbe95e",9023:"651422d5",9032:"27dc3412",9045:"94f8b490",9047:"84ad0742",9074:"4393e4bc",9156:"70771f47",9184:"9918870c",9190:"9d037edf",9231:"4458e1ed",9290:"a6e0ba43",9342:"5b01a645",9353:"b70c40d3",9355:"d5484ed9",9461:"47016385",9514:"1be78505",9566:"e1a3fa89",9572:"7a1dcf5b",9589:"95a8df98",9590:"88c087fa",9605:"81fab8ae",9627:"eaf6f8ac",9636:"0776bfb3",9677:"67747c0c",9688:"e368a9fb",9743:"0fdc98ff",9769:"f4457846",9774:"42f12bbb",9783:"11166ec1",9884:"92dce1fb",9924:"df203c0f",9933:"16ae7048",9948:"531549fe",9993:"0566ee4e"}[e]||e)+"."+{4:"4e9a5cf9",37:"d0c0ee9a",46:"ee8a0396",53:"169bbb64",87:"f0ca7f41",116:"d88674bc",134:"3443cf4b",149:"993e917d",153:"d9388ba1",221:"bb3028d5",247:"5db2e7c5",342:"f08e820c",370:"4f5184c5",409:"e05c44db",459:"17ea8335",464:"c3e3948c",486:"e1f777fb",550:"263f2266",559:"e300b666",763:"9eb891ab",821:"84effaa1",911:"34bfcbb8",913:"f1c452f7",1014:"0a1bf1be",1037:"868865cd",1056:"2e85c605",1059:"8316bb33",1065:"8346d600",1067:"b2cf6a34",1280:"3459eb36",1284:"0865407d",1309:"1e5a725d",1317:"8fc6a14e",1324:"ff62103b",1348:"852747df",1356:"58bab6f5",1358:"389df322",1367:"28dc15ee",1380:"7a3f0c91",1389:"abe02502",1396:"304f611b",1567:"056a69b4",1601:"4739d856",1617:"cab302d2",1629:"326918e6",1638:"21a32d60",1698:"e8e5f480",1701:"496e2f5c",1727:"a79eae31",1735:"ff737b29",1743:"f04fae71",1849:"db81de9d",1906:"e3288415",1924:"7d9cb8d2",1966:"0cbc9668",2066:"928fa6de",2115:"2e8e7d9d",2124:"f14034a8",2163:"5dc8a79a",2206:"7ef85fe4",2218:"beff1ed1",2250:"d9da0a6b",2302:"0e0d5e90",2341:"45273b48",2399:"931b3631",2421:"f2d84b1d",2451:"5c5ad60e",2525:"b4127711",2536:"13eda9d8",2551:"dd1b8c40",2626:"fcfd7367",2649:"5ef379ab",2659:"6e79b1a4",2684:"4864a6cb",2801:"b9f6a93b",2925:"935697c2",2930:"a85eac6d",2970:"5655a6d5",3007:"24045c19",3015:"73c7f255",3019:"86f9630c",3106:"00ea1bab",3108:"cca8f410",3114:"4b111030",3156:"4ad87429",3165:"cc1265f7",3171:"c7342b03",3186:"24413443",3237:"81ec8653",3294:"94d4a9ce",3334:"9d00ae76",3335:"e360db32",3375:"e5853741",3451:"cd53069a",3459:"3eb76891",3551:"d0fc83fd",3610:"b42ac234",3614:"73cf28ce",3659:"23a46961",3721:"3003829e",3723:"278993e4",3743:"1a035772",3751:"f2161fcd",3752:"7c3fe38f",3762:"d43618bf",3789:"04b45f76",3814:"00465c0b",3820:"bc4e176e",3878:"06a383c6",3892:"f4ebdcb0",3901:"9c94f8a6",3941:"2712e828",4001:"b3da0a62",4015:"6b26d853",4074:"dc1e8ab4",4092:"9986218e",4102:"86396385",4121:"cd5b5b3d",4195:"98eaf8b4",4230:"1064a11b",4328:"2c52c5d1",4361:"63327a55",4479:"2fdf126a",4497:"4f86a8cd",4506:"0b055596",4518:"feb05cf8",4597:"3da8bb1f",4641:"9361363c",4698:"33cac92c",4705:"151c48c7",4802:"d1361500",4821:"a593b19a",4845:"07da2c57",4931:"138d3cc2",4936:"f4446c95",4974:"6744cb79",5037:"a5fbff25",5044:"8b6ab0e9",5050:"59acae77",5100:"2ef7028b",5146:"ce7a1a3f",5201:"5cd52e38",5290:"fc6b4f52",5307:"f3384d70",5312:"09b14ea1",5426:"c006e06c",5500:"0192623f",5516:"68bc200c",5550:"dc2fa33a",5599:"42ce2c0e",5615:"7c22b5ad",5653:"fffdcf8c",5658:"9339814d",5683:"88e72369",5745:"bfd2a379",5784:"33269912",5805:"dcfadf8a",5815:"89e39bb2",5853:"3f8abe98",5928:"e978628a",5979:"333d8b8c",5980:"35d65e2c",5998:"ac6136bc",6023:"0efb6e8e",6045:"6ce347a1",6047:"ec3a843a",6066:"c78f7afc",6082:"27d28384",6170:"250d0906",6202:"06700d7f",6216:"318d92bd",6221:"a26cfee2",6347:"15eb26a9",6365:"42453980",6425:"572fcc59",6470:"710da58a",6487:"a5a27f95",6502:"b8492e6e",6517:"bd2d99fa",6553:"4e8a8e33",6584:"29f7bcc9",6615:"f9943408",6657:"4bdbf84c",6667:"206b2b98",6709:"666927c0",6779:"8f108577",6835:"82e579da",6919:"7696289d",6972:"cac13a08",6973:"1c7a3e5e",6980:"d1437360",7119:"af66ebc9",7128:"1827468e",7269:"795207ca",7292:"440c7a8f",7338:"fbfcfbdd",7367:"d38a5a68",7400:"b4b19ede",7422:"d9cc4236",7466:"392273b9",7470:"90217184",7502:"e8cba6bf",7530:"f13da92a",7599:"f886e7df",7607:"cab935d5",7615:"3dbe520b",7642:"1f3dc6ff",7644:"f500f40c",7676:"e503482c",7678:"d4ff5222",7682:"9efffa84",7719:"57721fbf",7729:"2dadf504",7809:"be4fcdc4",7811:"02da5a1f",7813:"c7338115",7826:"d6831948",7880:"95b22506",7894:"8ff7e182",7903:"ff68a5db",7918:"15777b09",7919:"8f492131",7992:"9d60d6f4",8003:"42f379cf",8006:"d3f12a53",8087:"56b50186",8145:"965f9317",8252:"6ccc4e7c",8255:"31ef19d5",8314:"80673733",8338:"e0cb42b2",8345:"194edf9f",8359:"de2d826f",8361:"a248ce8b",8403:"e3b9b680",8454:"a5c12fe6",8464:"294ce184",8477:"089ede95",8560:"0cb961b9",8562:"ee22c58f",8563:"8b0dbeb3",8643:"600d2692",8681:"a9e4a279",8712:"83f2098b",8760:"bcd65bbf",8851:"64a198a0",8879:"4a107983",8922:"eb31162e",8962:"5e8cc09c",8969:"57d1741e",8997:"f9098a98",9008:"3a3236cf",9012:"bba1cb95",9023:"3197d37b",9032:"626fbd8d",9045:"f1f2e0a7",9047:"c8a1eef8",9074:"c4d79b00",9156:"b017c60f",9184:"6591c182",9190:"6e82c14f",9231:"8db9d8a3",9290:"c3097b34",9342:"101a6b44",9353:"af0421a4",9355:"9dab3496",9461:"a14ffebf",9514:"550fc046",9566:"0bf155e1",9572:"bac2a656",9589:"9203f42d",9590:"d06e51af",9605:"d90a6d5c",9627:"e4163fc8",9636:"fbb5fd77",9677:"3b504a1c",9688:"878c8e9a",9743:"aad68954",9769:"2ac0d29b",9774:"f24c9567",9783:"4fa12fee",9884:"8a7719f5",9924:"5f4bb7b3",9933:"4f71a059",9948:"9af8a21d",9993:"9efb00bc"}[e]+".js",t.miniCssF=e=>{},t.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),t.o=(e,d)=>Object.prototype.hasOwnProperty.call(e,d),b={},a="redis-developer-hub:",t.l=(e,d,c,f)=>{if(b[e])b[e].push(d);else{var r,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var a=b[e];if(delete b[e],r.parentNode&&r.parentNode.removeChild(r),a&&a.forEach((e=>e(c))),d)return d(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},t.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.p="/",t.gca=function(e){return e={17896441:"7918",47016385:"9461",64867942:"4597",71898787:"3551",78895142:"7919",94686630:"2925","642fde82":"4","4504686e":"37","69c2ba4e":"46","935f2afb":"53","1dc33e9c":"87",f88d2135:"116","9d90cc60":"134","1e115aba":"149",e133680b:"153","9eeb107e":"221","2f1cd5d9":"247",d661e85d:"342","085ffbd7":"370","1cf06b0d":"409",c1ab566a:"459","1ef24e58":"464",bf9dc990:"550",bbaff741:"559","3483a674":"763","45db1732":"821","2bc54e2b":"911","73e252ed":"913","3c066edc":"1014","886ca5ea":"1037",c32ed07a:"1056","64f3f413":"1059","335cbbbb":"1065",cefa0e41:"1067","710646cd":"1280",b51fdc8c:"1284","872db8be":"1309","098330c0":"1317","0b9646e8":"1324",db1d58d4:"1348","899fdd9f":"1356","49623f30":"1358","85d526b8":"1367","21bf2b11":"1380","5d14e41e":"1389",c3151dc9:"1396","51a7d035":"1567","55ccde6e":"1601","96c61e5d":"1617","99cb7080":"1629","4843ea13":"1638","4141cbdd":"1698",e7e99a29:"1701","7f1e28a5":"1727","00d7f298":"1735",c65a5e23:"1743",f5985471:"1849","2c06bb79":"1906",c2cefeac:"1924",d3f14484:"1966","97a92876":"2066",f8bde386:"2115",e14e08fc:"2124","0545e01d":"2163",d888d60e:"2206",fd0bff62:"2218",f242a466:"2250","3bb72ea1":"2302","9daaef4f":"2341","70a5ec54":"2399","169d51e4":"2421","8632c9a0":"2451","163f4d81":"2525",c98631ab:"2536","6b968562":"2551","099d618a":"2626",f8c0f22b:"2649","730e35c4":"2659",ad08431a:"2684","470c62dd":"2801",b949b843:"2930",df84b43c:"2970",af72bfd1:"3007","129ce714":"3015","8083ca96":"3019",b0c5f580:"3106","2c83c4e0":"3108","552ba15d":"3114","9d845a85":"3156",b88966c9:"3165",d119b4b9:"3171","6b006b96":"3186","1df93b7f":"3237","0c264ecc":"3294","3962b4b6":"3334",f19dd2c1:"3335",fb01d865:"3375","6410f0dc":"3451","7654669a":"3459","584963dc":"3610","994c0519":"3659",aa02f6e6:"3721",e1c2ddaa:"3723",e9b9d3de:"3743","3720c009":"3751",c1e8360a:"3752","2d271c04":"3762",f1811081:"3789",b8f4daa2:"3814",f23cc535:"3820",c551111a:"3878","81b2d599":"3892","265284fc":"3901","45c1b0fa":"4001",d329623b:"4015","0ec1cc91":"4074","0c9e8015":"4092",bbc4d579:"4102","55960ee5":"4121","14612fe5":"4195",e65f8228:"4230",ba6f6003:"4328",e1610694:"4361",b3b34ca6:"4479","5c25e952":"4497","879b2dca":"4506","6e373ae3":"4518","1d1b3f81":"4641","29eff2aa":"4698","106fc9f0":"4705","9c28af5e":"4802",a8be125d:"4821",d63023de:"4845","7f823891":"4931","6c272831":"4936","6980e26f":"4974",e7f62945:"5037","9f69dad8":"5044",e8e1f04a:"5050",a5b41932:"5100","28b9548b":"5146","31350f16":"5201",b400c9cd:"5290",b3fefca0:"5307","1341f4ef":"5312","51ede774":"5426",c414f47b:"5500","18edca16":"5516",fba0c1ab:"5550","5a6c6630":"5599","23200c1b":"5615","03506951":"5653",e1d45d33:"5658",a5c3d9e9:"5683",d866cdfa:"5745","110bbf9d":"5784",d3555a77:"5805","165403c2":"5815",b0c8cd2e:"5853",a3dbe69d:"5928","46c09b1c":"5979",c1bb9856:"5980",b2c8b850:"5998",eed077d2:"6023",d7f02a39:"6045","12853b3b":"6047",ccdc297e:"6082","9a2b95c5":"6170","862d5cd0":"6202","6c249fdd":"6216","434536ad":"6221","16138ba5":"6347","2d198a95":"6365","18050ae3":"6425",c5c6e5e3:"6470",eaf8982a:"6487","13ca8dc8":"6502","84711cce":"6517","2601a326":"6553",e83211e5:"6584","50b6ea97":"6615","965be803":"6657","0c306a96":"6667","2123ba4c":"6709","4c591bd5":"6779","9e5bb348":"6835","8ec5e180":"6919","07a6fe21":"6972","214a573e":"6973",e49a9233:"6980",fd375cb3:"7119",eb92c419:"7128","9f54a902":"7269","8fea5263":"7292","613fe82a":"7338",aa68f0e9:"7367","4c958ddc":"7400","44556e56":"7422","46b3dd76":"7466","5450f727":"7470","3b0e0d3a":"7502",d9fa573c:"7530",cd548de4:"7599","1756b4ab":"7607",bfe77eb6:"7615","90e92e2d":"7642",bc14dfd3:"7644",ba498387:"7676",d9a0a71f:"7678",fd091685:"7682","2b69ed49":"7719",ddd418a4:"7729","2cac92cf":"7809","8d1d1c2e":"7811","8632df87":"7813","7a460b64":"7826",bda60f59:"7880",ed35ad37:"7894","59bac03b":"7903","0abd8cd0":"7992","8460a6e0":"8003","53b913eb":"8006","405b4698":"8087","83a556d4":"8145",d69fc27c:"8252",dc17824a:"8255","18719bcf":"8314","223019eb":"8338","82b6bb4d":"8345","3b168288":"8359","0f0e7497":"8361","2b53ff98":"8403",f9ed4a2e:"8454",b59188bc:"8464",f251a97b:"8477",f7282e3d:"8560",c42ebdb1:"8562",b697db3c:"8563",f2d3ddf1:"8643","9a2aa159":"8681",c3e7abab:"8712","389b810a":"8760","5ef34981":"8851","3d36e868":"8879","0b7ca231":"8922",cb3ee170:"8962","5420bc6f":"8969","4c484ee6":"8997",ab704c68:"9008",f3dbe95e:"9012","651422d5":"9023","27dc3412":"9032","94f8b490":"9045","84ad0742":"9047","4393e4bc":"9074","70771f47":"9156","9918870c":"9184","9d037edf":"9190","4458e1ed":"9231",a6e0ba43:"9290","5b01a645":"9342",b70c40d3:"9353",d5484ed9:"9355","1be78505":"9514",e1a3fa89:"9566","7a1dcf5b":"9572","95a8df98":"9589","88c087fa":"9590","81fab8ae":"9605",eaf6f8ac:"9627","0776bfb3":"9636","67747c0c":"9677",e368a9fb:"9688","0fdc98ff":"9743",f4457846:"9769","42f12bbb":"9774","11166ec1":"9783","92dce1fb":"9884",df203c0f:"9924","16ae7048":"9933","531549fe":"9948","0566ee4e":"9993"}[e]||e,t.p+t.u(e)},(()=>{var e={1303:0,532:0};t.f.j=(d,c)=>{var b=t.o(e,d)?e[d]:void 0;if(0!==b)if(b)c.push(b[2]);else if(/^(1303|532)$/.test(d))e[d]=0;else{var a=new Promise(((c,a)=>b=e[d]=[c,a]));c.push(b[2]=a);var f=t.p+t.u(d),r=new Error;t.l(f,(c=>{if(t.o(e,d)&&(0!==(b=e[d])&&(e[d]=void 0),b)){var a=c&&("load"===c.type?"missing":c.type),f=c&&c.target&&c.target.src;r.message="Loading chunk "+d+" failed.\n("+a+": "+f+")",r.name="ChunkLoadError",r.type=a,r.request=f,b[1](r)}}),"chunk-"+d,d)}},t.O.j=d=>0===e[d];var d=(d,c)=>{var b,a,f=c[0],r=c[1],o=c[2],n=0;if(f.some((d=>0!==e[d]))){for(b in r)t.o(r,b)&&(t.m[b]=r[b]);if(o)var i=o(t)}for(d&&d(c);n Redis Discord Server | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis Discord Server

The Redis Discord server is a place where you can learn, share, and collaborate about anything and everything Redis.

  • Connect with users from the community, Redis University, and Redis Inc.
  • Get your questions answered and learn cool new tips and tricks.
  • Watch for notifications of new content from Redis and the community.
  • Share your knowledge and expertise with others.

How to join

If you already have a Discord account, joining is easy. Just go to https://discord.gg/redis and your in.

If you don't have a Discord account, you'll need to create one. Once you have an account, you can download Discord for you desktop, get the mobile app, or just use it in a browser.

Server rules

One you are on the server, you should be aware of our rules:

  • Be respectful.
  • Sending and linking harmful material will result in an immediate and permanent ban.
  • No shady links (e.g. to .jar files, or .exe files).
  • DO NOT spam. Also, DO NOT spam.
  • DO NOT use vulgar or explicit language—keep it family friendly.
  • DO NOT claim authorship of assets that are not yours. Plagiarism will result in a ban.
  • DO share your projects and ideas, but DO NOT advertise or solicit in the chat.
  • DO NOT ping @here or @everyone and DO NOT spam your messages to multiple channels.
  • DO NOT DM the admins or send us friend requests unless we invite you to do so.
  • Abide by the Redis Community Guidelines & Code of Conduct.

How to get help

Got a question you want answered? Got a problem you can’t figure out? Don’t worry. Happens to the best of us. We have a help system inspired by the one used by the most excellent Python Discord.

tl;dr

Ask a question in a channel in ❓ Help: Available. That channel moves to ✋ Help: In Use. Converse, discuss, get an answer. Close the channel with /close when you’re done and it moves to 💤 Help: Dormant.

Full Version

So, we built a help system to make it easier to ask and answer. There are a series of help channels on the server that all start with help- followed by a letter in the NATO Phonetic Alphabet (you know, the whole alfa bravo charlie thing). There are 26 channels grouped into Help: Available, Help: In Use, and Help: Dormant.

Help: Available contains channels that are available for a question. If you have a question, you can claim this channel simply by posting it there. The channel will immediately be moved to Help: In Use.

:raisedhand: Help: In Use has channels that are being used to answer a question. If you have the answer to a fellow user’s question, post it here. If you asked the question, other users may answer it. If someone has answered your question—or perhaps you answer it yourself—you can close the channel by typing /close. Channels will automatically close if they are inactive for 48 hours. Closed channels are moved to _Help: Dormant.

💤 Help: Dormant is for channels that are not currently in use. You can read them—useful as there are lots of answered questions here—but you cannot post to them. When someone claims a channel in Help: Available, a random channel is selected from Help: Dormant to replace it.

- + \ No newline at end of file diff --git a/community/index.html b/community/index.html index b8a52414ce..c5c69fce32 100644 --- a/community/index.html +++ b/community/index.html @@ -4,7 +4,7 @@ Get Involved with Redis Community | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Get Involved with Redis Community

Need help with Redis? Do you want to share something cool? Want to learn more Redis? Check out some of the great community resources at your disposal:

Join the Redis Discord to get help and share your knowledge
Watch live Redis content on Redis Live
- + \ No newline at end of file diff --git a/create/aws/analytics-using-aws/index.html b/create/aws/analytics-using-aws/index.html index d5427a9f2b..b3f36a5779 100644 --- a/create/aws/analytics-using-aws/index.html +++ b/create/aws/analytics-using-aws/index.html @@ -4,7 +4,7 @@ How to Build and Deploy Your Own Analytics Dashboard using NodeJS and Redis on the AWS Platform | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to Build and Deploy Your Own Analytics Dashboard using NodeJS and Redis on the AWS Platform


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

An interactive analytics dashboard serves several purposes. They allow you to share data and provide you with all those vital information to make game-changing decisions at a faster pace. Building a real-time dynamic dashboard using a traditional relational database might require a complex set of queries. By using a NoSQL database like Redis, you can build a powerful interactive and dynamic dashboard with a small number of Redis commands.

Let’s take a look at how this was achieved.

  • What will you build?
  • What will you need?
  • Getting started
  • How does it work?
  • How data is stored
  • Navigating the application

What will you build?

You’ll build an analytics dashboard app that uses Redis Bitmap written in NodeJS (JavaScript) and then deploy it to AWS.

Ready to get started? Ok, let’s dive straight in.

What will you need?

  • NodeJS: used as an open-source, cross-platform, backend JavaScript runtime environment that executes Javascript code outside a web browser.
  • Redis Cloud: used as a real-time database, cache, and message broker.
  • NPM: used as a package manager. It allows you to build node apps.

Getting Started

Prepare the environment

  • Install Node - v12.19.0
  • Install NPM - v6.14.8

Step 1. Sign up for a Free Redis Enterprise Cloud Account

Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

image

Choose AWS as a Cloud vendor while creating your new subscription. At the end of the database creation process, you will get a Redis Enterprise CLoud database endpoint and password. You can save it for later use.

image

Step 2. Clone the repository

 git clone https://github.com/redis-developer/basic-analytics-dashboard-redis-bitmaps-nodejs

Step 3. Set up a backend environment

First we will be setting up environment variables

Go to /server folder (cd ./server) and then execute the below command:

 cp .env.example .env

Open .env file and add Redis Enterprise Cloud Database Endpoint URL, port and password as shown below:


PORT=3000

# Host and a port. Can be with `redis://` or without.
# Host and a port encoded in redis uri take precedence over other environment variable.
# preferable
REDIS_ENDPOINT_URI=redis://redis-XXXX.c212.ap-south-1-1.ec2.cloud.redislabs.com:15564

# Or you can set it here (ie. for docker development)
REDIS_HOST=redis-XXXX.c212.ap-south-1-1.ec2.cloud.redislabs.com
REDIS_PORT=XXXX

# You can set password here
REDIS_PASSWORD=reXXX

COMPOSE_PROJECT_NAME=redis-analytics-bitmaps

Step 4. Install dependencies

 npm install

Step 5. Run the backend

 npm run dev

Step 6. Set up the frontend environment

Go to the client folder (cd ./client) and then:

 cp .env.example .env

Add the exact URL path and port number of your choice for VUE_APP_API_URL parameter as shown below:

VUE_APP_API_URL=http://localhost:3000

Step 7. Install dependencies

 npm install

Step 8. Run the frontend

 npm run serve

analytics

How does it work?

How the data is stored:

The event data is stored in various keys and data types which is discussed below:

For each of time spans:
  • year: like 2021
  • month: like 2021-03 (means March of 2021)
  • day: like 2021-03-03 (means 3rd March of 2021)
  • weekOfMonth: like 2021-03/4 (means 4th week of March 2021)
  • anytime
For each of scopes:
  • source
  • action
  • source + action
  • action + page
  • userId + action
  • global
For each of data types/types:
  • count (Integer stored as String)
  • bitmap
  • set

It generates keys with the following naming convention:

 rab:{type}[:custom:{customName}][:user:{userId}][:source:{source}][:action:{action}][:page:{page}]:timeSpan:{timeSpan}

where values in [] are optional.

For each generated key like rab:count:*, data is stored like: INCR {key}

Example:
 INCR rab:count:action:addToCart:timeSpan:2015-12/3

For each generated key like: rab:set:*, data is stored like:

 SADD {key} {userId}
Example:
 SADD rab:set:action:addToCart:timeSpan:2015-12/3 8
  • For each generated key like rab:bitmap:*, data is stored like:

     SETBIT {key} {userId} 1
Example:
 SETBIT rab:bitmap:action:addToCart:timeSpan:2015-12/3 8 1

Cohort data

  • We store users who register and then bought some products (action order matters).
  • For each buy action in December we first check if the user performed register action (register counter must be greater than zero).
  • If so, we set user bit to 1
Example
  SETBIT rab:bitmap:custom:cohort-buy:timeSpan:{timeSpan} {userId} 1
  • Example - User Id 2 bought 2 products on 2015-12-17. It won't be stored.
  • Example - User Id 10 bought 1 product on 2015-12-17 and registered on 2015-12-16. So, it will be stored like:
 SETBIT rab:bitmap:custom:cohort-buy:timeSpan:2015-12 10 1
  • We assume that user cannot buy without registering first.

Retention data

  • Retention means users who bought on two different dates

  • For each buy action we check if user bought more products anytime than bought on particular day (current purchase not included).

  • If so, we add user id to set like:

     SADD rab:set:custom:retention-buy:timeSpan:{timeSpan} {userId}
  • Example - User Id 5 bought 3 products on 2015-12-15. His retention won't be stored (products bought on particular day: 2, products bought anytime: 0).

  • Example - User Id 3 bought 1 product on 2015-12-15 and before - 1 product on 2015-12-13. His retention will be stored (products bought on particular day: 0, products bought anytime: 1) like:

 SADD rab:set:custom:retention-buy:timeSpan:2015-12

How the data is accessed:

Total Traffic:

December:
  BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12```
X week of December:
  BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:custom:global:timeSpan:2015-12/3

Traffic per Page ({page} is one of: homepage, product1, product2, product3):

December:
 BITCOUNT rab:bitmap:action:visit:page:{page}:timeSpan:2015-12
Example:
 BITCOUNT rab:bitmap:action:visit:page:homepage:timeSpan:2015-12
X week of December:
 BITCOUNT rab:bitmap:action:visit:page:{page}:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:action:visit:page:product1:timeSpan:2015-12/2

Traffic per Source ({source} is one of: Google, Facebook, email, direct, referral, none):

December:
 BITCOUNT rab:bitmap:source:{source}:timeSpan:2015-12
Example:
 BITCOUNT rab:bitmap:source:referral:timeSpan:2015-12
X week of December:
 BITCOUNT rab:bitmap:source:{source}:timeSpan:2015-12/{X}
Example:
 BITCOUNT rab:bitmap:source:google:timeSpan:2015-12/1
Trend traffic ({page} is one of: homepage, product1, product2, product3):
December:

From

 BITCOUNT rab:bitmap:action:visit:{page}:timeSpan:2015-12-01

to

 BITCOUNT rab:bitmap:action:visit:{page}:timeSpan:2015-12-31
  • 1st Week of December: Similar as above, but from 2015-12-01 to 2015-12-07
  • 2nd Week of December: Similar as above, but from 2015-12-08 to 2015-12-14
  • 3rd Week of December: Similar as above, but from 2015-12-15 to 2015-12-21
  • 4th Week of December: Similar as above, but from 2015-12-22 to 2015-12-28
  • 5th Week of December: Similar as above, but from 2015-12-29 to 2015-12-31
Example:
 BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-29 => BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-30 => BITCOUNT rab:bitmap:action:visit:homepage:timeSpan:2015-12-31

Total products bought:

December:
 GET rab:count:action:buy:timeSpan:2015-12
X week of December:
 GET rab:count:action:buy:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:buy:timeSpan:2015-12/1

Total products added to cart:

December:
 GET rab:count:action:addToCart:timeSpan:2015-12
X week of December:
 GET rab:count:action:addToCart:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:addToCart:timeSpan:2015-12/1
Shares of products bought ({productPage} for product1, product2, product3):

December:

 GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12
Example:
 GET rab:count:action:buy:page:product3:timeSpan:2015-12
X week of December:
 GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12/{X}
Example:
 GET rab:count:action:buy:page:product1:timeSpan:2015-12/2

Customer and Cohort Analysis

  • People who registered: BITCOUNT rab:bitmap:action:register:timeSpan:2015-12

  • People who register then bought (order matters): BITCOUNT rab:bitmap:custom:cohort-buy:timeSpan:2015-12

  • Dropoff: (People who register then bought / People who register) * 100 [%]

  • Customers who bought only specified product ({productPage} is one of: product1, product2, product3):

     SMEMBERS rab:set:action:buy:page:{productPage}:timeSpan:2015-12
Example:
 SMEMBERS rab:set:action:buy:page:product2:timeSpan:2015-12

Customers who bought Product1 and Product2:

 SINTER rab:set:action:buy:page:product1:timeSpan:anytime rab:set:action:buy:page:product2:timeSpan:anytime

Customer Retention (customers who bought on the different dates):

 SMEMBERS rab:set:custom:retention-buy:timeSpan:anytime

References

- + \ No newline at end of file diff --git a/create/aws/bidding-on-aws/index.html b/create/aws/bidding-on-aws/index.html index be931632ba..122389b9d0 100644 --- a/create/aws/bidding-on-aws/index.html +++ b/create/aws/bidding-on-aws/index.html @@ -4,7 +4,7 @@ How to Build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Digital technology has propelled us forward to an exciting new era and has transformed almost every aspect of life. We’re more interconnected than ever as communication has become instant. Working from home has now become the norm, helping us pivot to a new way of working during the pandemic. And our ability to reduce carbon emissions by attending work-related events online has meant that we’ve taken greater strides to combat global warming. Continuing this trend is Shamshir Anees and his team, who have created an application that can host digital auctions. By using Redis, data transmission between components was carried out with maximum efficiency, providing users with real-time bidding updates on the dashboard.

Let’s take a look at how this was achieved. We’d also like to point out that we have a diverse range of exciting applications for you to check out on the Redis Launchpad.

  • What will you build?
  • What will you need?
  • Architecture
  • How does it work?
  • Getting started
  • How data is stored
  • Navigating the application

What will you build?

You’ll build an application that will allow users to attend and take part in digital auctions. The application will allow users to create an account, put in bids, and even set up their own auction. Below we’ll uncover the required components, their functionality, and how to deploy them within this architecture.

Ready to get started? Ok, let’s dive straight in.

What will you need?

  • NodeJS: used as an open-source, cross-platform, backend JavaScript runtime environment that executes Javascript code outside a web browser.
  • Amazon Cognito: used to securely manage and synchronize app data for users on mobile.
  • Redis Cloud: used as a real-time database, cache, and message broker.
  • Redis Stack: used to store, update and fetch JSON values from Redis.
  • Socket.IO: used as a library that provides real-time, bi-directional, and event-based communication between the browser and the server.
  • AWS Lambda: used as a serverless compute service that runs your code in response events and manages the underlying compute service automatically for you.
  • Amazon SNS/Amazon SES: a fully managed messaging service for both application-to-application (A2A) and application-to-person (A2P) communication.

Architecture

My Image

How does it work?

All auctions

NodeJS connects to the Redis Enterprise Cloud database.

The frontend then communicates with the NodeJS backend through API calls.

GET : /api/auctions fetches all the keys from Auctions Hash.

NodeJS uses the Redis module to work with Redis Enterprise Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Each auction

GET : /api/auctions/{auctionId} fetches each auction item by id.

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

All bidding data of an auction item

GET : /api/bidding/{auctionId}

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Profile settings

GET : /api/settings

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

User info

GET : /api/users/{email}

NodeJS uses the Redis module to work with Redis Cloud. The Redis client is then created using the Redis credentials and hmget(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Getting started

Prerequisites

Step 1. Sign up for a Free Redis Enterprise Cloud Account

Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

image

Choose AWS as a Cloud vendor while creating your new subscription. At the end of the database creation process, you will get a Redis Enterprise CLoud database endpoint and password. You can save it for later use.

image

Step 2: Clone the backend GitHub repository

https://github.com/redis-developer/NR-digital-auction-backend

Step 3. Install the package dependencies

The 'npm install' is a npm cli-command that does the predefined thing i.e install dependencies specified inside package.json

npm install

Step 4. Setting up environment variables

export REDIS_END_POINT=XXXX
export REDIS_PASSWORD=XXX
export REDIS_PORT=XXX

Step 5. Building the application

npm run build

Step 6. Starting the application

npm start

Step 7. Cloning the Frontend GITHUB repository

git clone https://github.com/redis-developer/NR-digital-auction-frontend

Step 8. Building the application

npm run build

Step 9. Starting the application

npm start

Step 10. Accessing the application

accessing

Step 11. Signing up to the application

signing

Step 12. Sign-in

sign

Step 13. Accessing the dashboard

access

Step 14. Listing the auction item

Listing

Step 15. Accessing the bidding page

accessing

How data is stored

The Redis Enterprise Cloud database with Redis Stack is what you’ll use to install the data.

Auctions

  • Type - Redis Hash.
  • Used for storing auctions data.
  • UUID generated from the backend (NodeJS) serves as the key
  • JSON data which represents the Auction object and includes the following keys
    • auctionId
    • auctionItemName
    • description
    • lotNo
    • quantity
    • buyersPremium
    • itemUnit
    • minBidAmount
    • bidIncrement
    • startDateTime
    • endDateTime
    • images
    • currentBid
  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.
  • POST : /api/auctions.
  • The request body has JSON data to be inserted into the database.
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created. using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Biddings

  • Type - Redis Hash

  • Used for storing the bids placed on each auction item

  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.

  • POST : /api/bidding

  • The request body has JSON data to be inserted into the database.

  • AuctionId from request body serves as the key

  • JSON data which includes keys:

    • currentBid
    • currentBidTime
    • currentBidEndTime, and
    • biddings array (id, auctionId, userId, username, bidAmount, bidTime)
  • The bidding array has all of the bids placed for a particular auction item.

  • Based on the current BidEndTime and BidTime, the auction end date is extended based on the Dynamic closing concept.

  • Current dynamic closing logic - If a new bid is placed within the last 5 minutes of the auction end time, the end time is extended by 1 hour.

  • This will be configurable in the SaaS solution.

  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Profile Settings

  • Type - string
  • JSON data which includes keys - serves as a value
  • NodeJS connects to the Redis Cloud database. The frontend communicates with the NodeJS backend through API calls.
  • POST : /api/settings
  • The request body has JSON data to be inserted into the database.
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Users

  • Type - Redis Hash
  • Used for storing user details
  • NodeJS connects to the Redis Cloud database. The Frontend communicates with the NodeJS backend through API calls.
  • POST : /api/users
  • The request body has JSON data to be inserted into the database
  • The email id serves as the key
  • The JSON data which includes keys - serves as a value
  • NodeJS uses the Redis module to work with Redis Cloud. The Redis client is created using the Redis credentials and hmset(). This is the equivalent of the HMSET command that’s used to push data to the Redis database.

Creating an account

When you go onto the Digital Auction’s homepage, you’ll come across a range of items that are to be auctioned (see below). Click on the ‘Welcome’ button to create an account.

creating

You’ll then be taken to the sign-up page. Enter your details and click ‘sign-up.’ Once you’ve completed the sign-up form, you’ll receive a confirmation email to activate your account.

Placing a bid

Go to the homepage to have access to view all of the items and their auction details. All of the data here is being populated by Redis Stack and Redis Cloud. Scroll through the page and click on the item that you want to place a bid for.

placing

When you click on an item, you’ll see the details for the bidding process at the top of the page. You’ll also have the option to set a reminder by receiving an email of whenever the bidding process of this item begins.

On the right-hand side of the image, you’ll see the highest bid that’s been placed for this item. Below is a list of previous bids made by different users which are updated in real-time.

Click on the ‘Place Bid’ button to make a bid.

To access the meta-data information or view more images of the item, simply scroll down the page (see below).

placebid

Viewing your Bidding History

Click on ‘My biddings’ at the top of the navigation bar to view your bidding history (see below).

view

Viewing upcoming auctions

Click on ‘Auctions’ at the top of the navigation bar to view all upcoming auctions.

auction

Conclusion: Leveraging Redis and AWS to Empower Auctioneers with Real-time Data

Digital technology has had a ripple effect across all aspects of modern life. The ability to complete important tasks online instead of in-person has revolutionized the way we live, helping us to reduce carbon emissions, save time from traveling and have instant access to reams worth of data that we never had before.

However, the success of such events hinges on a database’s ability to transmit data in real-time. Any blips in transmission would create a disconnect between users and the auction, impeding auctioneers’ reactions to bids. This would only result in frustration, disengagement, and a complete divorce of users from the application.

But thanks to Redis, the components that made up the architecture system became vastly more interconnected so data was able to be sent, processed, and received in real-time. Achieving this paves the way for a smooth bidding process where users can interact with events in real-time without interruptions, ultimately enhancing the functionality of the app.

NR-Digital-Auction is a fantastic example of how innovations can be brought to life by using Redis. Everyday programmers are experimenting with Redis to build applications that are impacting everyday life from around the world and you can too!

So what can you build with Redis? For more inspiration, you can head over to the Redis Launchpad to access an exciting range of applications. If you're ready to get started building, quickly spin up a free database Redis Enterprise.

- + \ No newline at end of file diff --git a/create/aws/chatapp/index.html b/create/aws/chatapp/index.html index f6f86fa9e7..d7f1028d11 100644 --- a/create/aws/chatapp/index.html +++ b/create/aws/chatapp/index.html @@ -4,7 +4,7 @@ How to Build a Real Time Chat application on Amazon Web Services using Python and Redis | The Home of Redis Developers - + @@ -19,7 +19,7 @@ User data is stored in a hash set where each user entry contains the next values:

  • username: unique user name;

  • password: hashed password

  • Additionally a set of rooms is associated with user

  • Rooms are sorted sets which contains messages where score is the timestamp for each message

  • Each room has a name associated with it

  • Online set is global for all users is used for keeping track on which user is online.

  • User hash set is accessed by key user:{userId}. The data for it stored with HSET key field data. User id is calculated by incrementing the total_users key (INCR total_users)

  • Username is stored as a separate key (username:{username}) which returns the userId for quicker access and stored with SET username:{username} {userId}.

  • Rooms which user belongs too are stored at user:{userId}:rooms as a set of room ids. A room is added by SADD user:{userId}:rooms {roomId} command.

  • Messages are stored at room:{roomId} key in a sorted set (as mentioned above). They are added with ZADD room:{roomId} {timestamp} {message} command. Message is serialized to an app-specific JSON string.

Step 9. How the data is accessed?

Get User HGETALL user:{id}.

HGETALL user:2

where we get data for the user with id: 2.

  • Online users: SMEMBERS online_users. This will return ids of users which are online

  • Get room ids of a user: SMEMBERS user:{id}:rooms. Example:

 SMEMBERS user:2:rooms

This will return IDs of rooms for user with ID: 2

  • Get list of messages ZREVRANGE room:{roomId} {offset_start} {offset_end}. Example:
 ZREVRANGE room:1:2 0 50

It will return 50 messages with 0 offsets for the private room between users with IDs 1 and 2.

Further References

- + \ No newline at end of file diff --git a/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html b/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html index 670a90c1c2..bd421f64ac 100644 --- a/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html +++ b/create/aws/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html @@ -4,7 +4,7 @@ How to migrate your database from AWS ElastiCache to Redis without any downtime | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to migrate your database from AWS ElastiCache to Redis without any downtime


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Most of the database migration tools available today are offline in nature. They are complex and require manual intervention.

If you want to migrate your data from Amazon ElastiCache to Redis Enterprise Cloud, for example, the usual process is to back up your ElastiCache data to an Amazon S3 bucket and then import your data using the Redis Enterprise Cloud UI. This process can require painful downtime and could result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server and applying the changes to the destination servers to keep both the servers in sync. That might sound like a good approach, but it can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

So we’ve come up with a different approach:

Introducing RIOT

image

RIOT is an open source online migration tool built by Julien Ruaux, a Solution Architect at Redis. RIOT implements client-side replication using a producer/consumer approach. The producer is the combination of the key and value readers that have a connection to ElastiCache. The key reader component identifies keys to be replicated using scan and keyspace notifications. For each key, the value reader component performs a DUMP and handles the resulting key+bytes to the consumer (writer), which performs a RESTORE on the Redis Enterprise connection.

This blog post will show how to perform a seamless online migration of databases from ElastiCache to Redis Enterprise Cloud.

Prerequisites:

You will require a few resources to use the migration tool:

  • A Redis Enterprise Cloud subscription, sign up here
  • Amazon ElastiCache (a primary endpoint in the case of a single-master EC and a configuration endpoint in the case of a clustered EC: Refer to Finding Connection Endpoints on the ElastiCache documentation to learn more)
  • An Amazon EC2 instance based on Linux

Step 1 - Setting up an Amazon EC2 instance

You can either create a new EC2 instance or leverage an existing one. In our example, we will first create an instance on Amazon Web Services (AWS). The most common scenario is to access an ElastiCache cluster from an Amazon EC2 instance in the same Amazon Virtual Private Cloud (Amazon VPC). We have used Ubuntu 16.04 LTS for this setup, but you can choose the Ubuntu or Debian distribution of your choice.

Use SSH to connect to this new EC2 instance from your computer as shown here:

ssh -i “public key” <AWS EC2 Instance>

Step 2 - Install the redis-cli tool

$ sudo apt update
# sudo apt install -y redis-tools

Verify the connectivity with the ElastiCache database

Syntax:

$ redis-cli -h <Elasticache Primary Endpoint > -p 6379

Command:

$ sudo redis-cli -h <elasticache primary endpoint> -p 6379

Ensure that the above command allows you to connect to the remote Redis database successfully.

Step 3 - Using the RIOT migration tool

Run the commands below to set up the migration tool.

Prerequisites:

Install Java

We recommended using OpenJDK 11 or later:

sudo add-apt-repository ppa:openjdk-r/ppa && sudo apt-get update -q && sudo apt install -y openjdk-11-jdk

Installing RIOT

Unzip the package and make sure the RIOT binaries are in place, as shown here:

wget https://github.com/Redislabs-Solution-Architects/riot/releases/download/v2.0.8/riot-redis-2.0.8.zip
unzip riot-redis-2.0.8.zip
cd riot-redis-2.0.8/bin/

You can check the version of RIOT by running the command below:

./riot-redis --version
RIOT version "2.0.8"
bin/riot-redis --help
Usage: riot-redis [OPTIONS] [COMMAND]
-q, --quiet Log errors only
-d, --debug Log in debug mode (includes normal stacktrace)
-i, --info Set log level to info
-h, --help Show this help message and exit.
-V, --version Print version information and exit.
Redis connection options
-r, --redis=<uri> Redis connection string (default: redis://localhost:6379)
-c, --cluster Connect to a Redis Cluster
-m, --metrics Show metrics
-p, --pool=<int> Max pool connections (default: 8)
Commands:
replicate, r Replicate a source Redis database in a target Redis database
info, i Display INFO command output
latency, l Calculate latency stats
ping, p Execute PING command

Once Java and RIOT are installed, we are all set to begin the migration process with the command below, which replicates data directly from the source (ElastiCache) to the target (Redis Enterprise Cloud).

Step 4 - Migrate the data

Finally, it’s time to replicate the data from ElastiCache to Redis Enterprise Cloud by running the following command:

sudo ./riot-redis -r redis://<source Elasticache endpoint>:6379 replicate -r redis://password@<Redis Enterprise Cloud endpoint>:port --live

ElastiCache can be configured in two ways: clustered and non-clustered. In the chart below, the first row shows what commands you should perform for the non-clustered scenario, while the second row shows the command for the clustered scenario with a specific database namespace:

As you can see, whenever you have a clustered ElastiCache, you need to pass the –cluster option before specifying the source ElastiCache endpoint.

Important notes

  • Perform user acceptance testing of the migration before using it in production.
  • Once the migration is complete, ensure that application traffic gets successfully redirected to the Redis Enterprise endpoint.
  • Perform the migration process during a period of low traffic to minimize the chance of data loss.

Conclusion

If you’re looking for a simple and easy-to-use live migration tool that can help you move data from Amazon ElastiCache to Redis Enterprise Cloud with no downtime, RIOT is a promising option.

- + \ No newline at end of file diff --git a/create/aws/index.html b/create/aws/index.html index 2ad387080e..8609f0fc28 100644 --- a/create/aws/index.html +++ b/create/aws/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Overview

The following links provide you with the available options to run apps on AWS using Redis:

Create a Redis database on AWS
Building a Slack Bot to Retrieve Lost Files Using AWS S3 and Search
Using Terraform to Deploy and Manage Redis Database on AWS
Learn how to build a Real-Time Bidding Platform using NodeJS, AWS Lambda and Redis
Migrating Your Database from AWS Elasticache to Redis usin RIOT tool
Building a Real-Time Chat application on AWS using Flask and Redis
Learn how to Build and Deploy Your Own Analytics Dashboard using NodeJS, AWS Lambda and Redis
- + \ No newline at end of file diff --git a/create/aws/redis-on-aws/images/index.html b/create/aws/redis-on-aws/images/index.html index 87f5dc3d10..21ff2bcf17 100644 --- a/create/aws/redis-on-aws/images/index.html +++ b/create/aws/redis-on-aws/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/aws/redis-on-aws/index.html b/create/aws/redis-on-aws/index.html index 89ab00db13..755d597c3d 100644 --- a/create/aws/redis-on-aws/index.html +++ b/create/aws/redis-on-aws/index.html @@ -4,7 +4,7 @@ Create Redis database on AWS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Redis database on AWS


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Redis Enterprise Cloud on AWS is a fully Managed Redis Enterprise as a service. Designed for modern distributed applications, Redis Enterprise Cloud on AWS is known for its high performance, infinite scalability and true high availability.

Follow the below steps to setup Redis Enterprise Cloud hosted over AWS Cloud:

Step 1. Create free cloud account

Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

My Image

Step 2. Create Your subscription

Next, you will have to create Redis Enterprise Cloud subscription. In the Redis Enterprise Cloud menu, click "Create your Subscription".

My Image

Step 3. Select the right Subscription Plan

Select "Fixed Plan" for low throughout application as for now.

My Image

Step 4. Select cloud vendor

For the cloud provider, select your preferred cloud (for demo purpose)

My Image

Step 5. Click "Create Subscription"

Finally, click on "Create Subscription" button.

My Image

You can now verify the subscription as shown below:

My Image

Step 6. Create database

Click "Create Database". Enter database name and your preferred module.

My Image

Step 7. Launch database

Click "Activate" and wait for few seconds till it gets activated. Once fully activated, you will see the database endpoints as shown below:

My Image

Redis Launchpad
- + \ No newline at end of file diff --git a/create/aws/slackbot/index.html b/create/aws/slackbot/index.html index 297751f119..c5eb6671f2 100644 --- a/create/aws/slackbot/index.html +++ b/create/aws/slackbot/index.html @@ -4,7 +4,7 @@ How to Build a Slack Bot to Retrieve Lost Files Using AWS S3 and Redis Search and Query Engine | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to Build a Slack Bot to Retrieve Lost Files Using AWS S3 and Redis Search and Query Engine


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

alt_text

If you work remotely then you’re likely to have come across Slack at some point. And if you use Slack on a daily basis, then you’ll be all too aware of how easy it can be to lose files. Being pinged every day by different employees across different channels makes it difficult to keep track of files.

Eventually, you may be forced to rummage through a cluttered library of documents in search of that one _crucial _document that’s needed for a task, report or even a meeting. We’ve all been there and the frustration is just as real as it is irritating...which is why this Launchpad App has created an application to remove this impediment.

It was a tricky application to make, requiring a lot of attention to detail across a number of different components for it to come to fruition. However, the success of this application was possible due to Redis’ ability to extract and process data proficiently.

Thanks to Redis, all of the components functioned harmoniously with one another, creating a fully optimal application that interacts with users in real-time.

Let’s take a look at how this was done. Before we dive in, we’d also like to point out that we have a variety of exciting applications for you to check out in our Redis Launchpad.

alt_text

  1. What will you build?
  2. What will you need?
  3. Architecture overview
  4. Getting started
  5. How it works

Step 1. What will you build?

You’ll build a special search engine that’s designed to retrieve lost files in Slack. This is especially handy for people who work from home, where documents between channels are easily lost between employees.

Below we’ll go through each step chronologically and highlight what components are required to create this application.

Ready to get started? OK, let’s dive straight in.

Step 2. What will you need?

  • Slack: used as an instant messaging app that connects employees with one another.
  • Slack Block Kit: used as a UI framework for Slack apps that offers a balance of control and flexibility when building experiences.
  • Python: the preferred programming language to connect Redis in the application.
  • Redis Stack: includes a built-in Search and Query feature that provides querying, secondary indexing and full-text search.
  • S3 bucket: used as a public cloud storage resource in Amazon Web Services (AWS).
  • AWS Textract: used as a machine learning service that automatically extracts text.
  • Nodejs: responsible for image generation.

Step 3. Architecture

Let’s look at each of the components that creates the Reeko-Slack bot:

alt_text

1. file_shared

  • When a new file is shared in any public slack channel the file_share event is sent to the Slack Bot app.
  • The file name is added as a suggestion using the FT.SUGADD command in Redis.
  • All file data is added using the JSON.SET command.
  • The file is then stored on the S3 bucket as an object with the key as the filename.

2. S3-get

  • The JSON.GET command checks whether the desired file exists.
  • The file will then be retrieved from the S3 bucket if found.
  • The FT.SEARCH command uses the Redis Search and Query engine to look for documents in the S3 bucket- Users are presented will be prompted with different file name suggestions based on what they’ve typed in the search bar.
  • Once the user chooses one of the file suggestions, it is then downloaded and sent back to Slack.

4. S3-delete

  • User types the file name from the command['text'] parameter
  • The file data is deleted from Redis using the JSON.DEL command and is also removed from Redis's suggestions using the FT.SUGDEL command.

5. Summarise-document

  • The file name is identified from the command['text'] parameter.
  • It is then retrieved from the S3 bucket through the JSON.GET command.
  • Users can either download the pdf or png file locally from the S3 bucket.
  • The text is extracted using AWS Textract.
  • The extracted text is then summarised using Hugging face transformers summarization pipeline. The text summary is also added back to the JSON document using JSON.SET command.
  • A post request is then sent to the /create-image on the NodeJS backend with the file name and summary text.
  • An image is generated using a base template.
  • The image that is returned is saved to the S3 bucket and sent back to Slack.
  • The image URL is also added to the JSON document using JSON.SET command.

What is the S3 bucket?

The S3 bucket is a simple storage service from Amazon. It allows users to store objects through a web service interface. The product’s value comes from its ability to store, protect and retrieve data from ‘buckets’ at any time from anywhere and on any device.

Step 4. Getting started

Prerequisites

  • Python 3.6+
  • ngrok
  • AWS Account
  • Slack
  • Docker

1. Run Redis Docker container

This simple container image bundles together the latest stable releases of Redis and select Redis modules from Redis Labs. This image is based on the official image of Redis from Docker. By default, the container starts with Redis' default configuration and all included modules loaded.

 docker run -d -p 6379:6379 redis/redis-stack

2. Setup a Python environment

To test the integration, Python needs to be installed on your computer. You can get a suitable release from here. To check your Python version, follow the command below.

 # Python 3.6+ required
git clone https://github.com/redis-developer/Reeko-Slack-Bot
cd Reeko-Slack-Bot
python3 -m venv env
source env/bin/activate
cd python-backend
pip3 install -r requirements.txt

3. Using ngrok as a local proxy

To develop locally we'll be using ngrok. This will allow you to expose a public endpoint that Slack can use to send your app events. If you haven't already, install ngrok from their website .

The ngrok exposes local networked services behind NATs and firewalls to the public internet over a secure tunnel. Share local websites, build/test webhook consumers and self-host personal services.

4. Setting up an AWS Account

For testing you need a verified aws account. You can get your credentials file at ~/.aws/credentials (C:\Users\USER_NAME.aws\credentials for Windows users) and copy the following lines in the .env file.

Also make sure to add your S3 bucket name in the .env file.

 AWS_ACCESS_KEY_ID="YOUR_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY="YOUR_SECRET_ACCESS_KEY"
BUCKET_NAME="YOUR_BUCKET_NAME"

5. Install Slack on your local system

Slack needs to be installed on your computer. If it hasn’t been installed, you can get it from here for Windows or Mac. If you don’t already have an account you can make one here.

To get started, you'll need to create a new Slack app by clicking on the following link - https://api.slack.com/apps The foundational framework we’ll be using is Bolt. This will make it easier to build Slack apps with the platform’s latest features.

  1. Click on the Create an App button
  2. Name the application ‘Reeko’ and choose the development workspace. \

alt_text

alt_text

  1. Requesting scopes - Scopes give your app permission to do things (for example, post messages) in your development workspace. You can select the scopes to add to your app by navigating over to the OAuth & Permissions sidebar.
  2. Add the Bot Token Scopes by clicking on the Add an OAuth Scope button.
OAuth ScopeDescription
channels:historyView messages and other content in public channels that reeko has been added to
channels:joinJoin public channels in a workspace
channels:readView basic information about public channels in a workspace
channels:joinJoin public channels in a workspace
chat:writeSend messages as @reeko
chat:write.customizeSend messages as @reeko with a customized username and avatar
chat:write.publicSend messages to channels @reeko isn't a member of
files:readView files shared in channels and conversations that reeko has been added to
files:writeUpload, edit, and delete files as reeko
  1. Add the User Token Scopes by clicking on the Add an OAuth Scope button
OAuth ScopeDescription
channels:historyView messages and other content in public channels that Reeko has been added to.
files:readView files shared in channels and conversations that Reeko has been added to.
  1. Install your own app by selecting the Install App button at the top of the OAuth & Permissions page, or from the sidebar.
  2. After clicking through one more green Install App To Workspace button, you'll be sent through the Slack OAuth UI.
  3. After installation, you'll land back in the OAuth & Permissions page and find a Bot User OAuth Access Token. and a User OAuth Token. Click on the copy button for each of them. These tokens need to be added to the .env file. (The bot token starts with xoxb whereas the user token is longer and starts with xoxp).
SLACK_USER_TOKEN=xoxp-your-user-token
SLACK_BOT_TOKEN=xoxb-your-bot-token

alt_text

  1. As well as the access token, you'll need a signing secret. Your app's signing secret verifies that incoming requests are coming from Slack. Navigate to the Basic Information page from your app management page. Under App Credentials, copy the value for Signing Secret and add it to the .env file.
SLACK_SIGNING_SECRET=your-signing-secret

alt_text

  1. Make sure you have followed the steps in Cloning the repo to start the bolt app. The HTTP server is using a built-in development adapter, which is responsible for handling and parsing incoming events from Slack on port 3000.
python3 app.py

alt_text

Open a new terminal and ensure that you've installed ngrok. Make sure to tell ngrok to use port 3000 (which Bolt for Python uses by default):

ngrok http 3000

alt_text

For local slack development, we'll use your ngrok URL from above, so copy it to your clipboard.

https://your-own-url.ngrok.io
  1. Now we’re going to subscribe to events. Your app can listen to all sorts of events that are happening around your workspace - messages being posted, files being shared and more. On your app configuration page, select the Event Subscriptions sidebar. You'll be presented with an input box to enter a Request URL, which is where Slack sends the events your app is subscribed to. Hit the save button.

By default Bolt for Python listens for all incoming requests at the /slack/events route, so for the Request URL you can enter your ngrok URL appended with /slack/events:

https://your-own-url.ngrok.io/slack/events

If the challenge was successful, you’ll get “verified” right next to the Request URL.

alt_text

On the same page click on the Subscribe to bot events menu that sits at the bottom of the page. Click on the Add Bot User Event.

Similarly, click on Subscribe to events on behalf of the user but then click on Add Workspace Event.

Add the following scopes

EventNameDescriptionRequired Scope
file_shareA file was sharedfiles:read
message.channelsA message was posted to a channelchannels: history

alt_text

  1. Select the Interactivity & Shortcuts sidebar and toggle the switch as on. Again, for the Request URL, enter your ngrok URL appended with /slack/events:
https://your-own-url.ngrok.io/slack/events

alt_text

  1. Scroll down to the Select Menus section in the Options Load URL and enter your ngork URL appended with /slack/events:
https://your-own-url.ngrok.io/slack/events

alt_text

  1. Finally we come to the slash commands. Slack's custom slash commands perform a very simple task. First they take whatever text you enter after the command itself (along with some other predefined values). Next, they’ll send it to a URL and accept whatever the script returns. After this, Slack will post it as a Slackbot message to the person who issued the command. We have 5 slash commands to add in the workspace.

Visit the Slash Commands sidebar and click on the Create New Command button to head over the Create New Command page. Add the Command, Request URL, Short Description and Usage hint, according to the table provided below.

Click on Save to return to the Slash Commands.

CommandRequest URLShort DescriptionUsage Hint
/s3-gethttps://your-own-url.ngrok.io/slack/eventsGet a file from s3 bucketfilename
/s3-searchhttps://your-own-url.ngrok.io/slack/eventsSearch for a file in S3
/s3-deletehttps://your-own-url.ngrok.io/slack/eventsDeletes the given file from the s3 bucketfilename
/summarise-documenthttps://your-own-url.ngrok.io/slack/eventsSummarise a documentfilename

alt_text

alt_text

alt_text

alt_text

alt_text

  1. Open the Slack channel and upload a file in any channel. Make sure to note the file name.

6. Setting up a NodeJS backend

Requirements

Getting Started:

alt_text

GraphicsMagick

GraphicsMagick is a highly versatile piece of software used for image processing. To generate images, you need to have GraphicsMagick installed on your machine.

You can find the suitable release from http://www.graphicsmagick.org/download.html#download-sites.

alt_text

Nodejs

note

Please follow all the steps in python-backend/README.md first.

Copy the AWS credentials from the python-backend/.env to the config.json file.

{
"accessKeyId": "",
"secretAccessKey": "",
"region": ""
}

Install all the packages and run the server.

npm install
npm start

alt_text

7: Connecting Slack to S3

This step involves bringing your AWS S3 bucket to your Slack account. Doing this will allow you to upload, download or delete files from your workspace without writing a single line of code.

/S3-get filename

The purpose of this command is to retrieve a specified file from the S3 bucket. Once you type in the name of the file in the search bar, Reeko will check whether this document exists. If the document doesn’t exist then it will return as false and nothing will be done.

If the file is found, then the JSON.GET command will capture its name and download it from the S3 bucket. The downloaded file is sent back as a direct message in Slack.

JSON.GET amazonshareholderletterpdf

alt_text

/s3-delete filename

This command involves deleting files from the S3 bucket. To achieve this you simply need to type in the file name in the search bar and Reeko will pull up the file as demonstrated below.

You’ll have the option to permanently delete the file from the S3 bucket. The file data is deleted from Redis using the JSON.DEL command and is removed from Search suggestions using the FT.SUGDEL command. You’ll be informed when the file is deleted.

FT.SUGDEL file-index "amazon-shareholder-letter.pdf"

JSON.DEL amazonshareholderletterpdf

Step 8: File searching

Have you ever searched for a file without being entirely sure what it is you’re looking for? You may remember snippets of the content but not enough to manually track down its location. Well due to Search’s autocomplete functionality this will no longer be a problem.

This command first opens up a modal inside of Slack with a search bar. You’ll then be suggested different file names will then be suggested depending on whatever the text you’ve written is. The way this works is simple:

Let’s assume the bucket has documents called abcd.csv, abcd.csv and abcdef.sv. If you type abc into the search bar, you’ll get these three results as a list from the FT.SEARCH command. From here you’ll be able to select the file you’re looking for. Once selected, the file is downloaded and sent back to Slack.

FT.SEARCH file-index "ama"

Step 9: Document summarization

In this step, Reeko will extract all of the text from the documents and summarize the content of each one with an image. This will prevent you from having to tediously open each document to get access to important information. Here’s how to do it:

  1. Get the file name from the command['text'] parameter.
  2. If the file is found, you can get the file's name by using the JSON.GET command.
 JSON.GET amazonshareholderletterpdf
  1. Download the pdf or png file locally from S3 bucket
  2. Extract the text using AWS Textract.
  3. The extracted text is summarised using the Hugging face transformers summarisation pipeline. The text summary is also added back to the JSON document using JSON.SET command.
 JSON.SET amazonshareholderletterpdf .summary ' Amazon has grown from having 158 employees to 614. We had just gone public at a split-adjusted stock price of $1. 50 per share.  In 1997, we hadnâ\x80\x99t invented prime, marketplace, alexa, or aws. If you want to be successful in business, you have to create more than you consume.  Your goal should be to create value for everyone you interact with. Stock prices are not about the past.  They are a prediction of future cash flows discounted back to the present.'
  1. A post request is then sent to the /create-image on the nodejs backend with the file name and summary text.
  2. An image is generated using a base template.
  3. The image that is returned is saved to the S3 bucket and sent back to Slack.
  4. The image URL is also added to the JSON document using JSON.SET command.
 JSON.SET amazonshareholderletterpdf .file_path 'https://bucket-1234.s3.amazonaws.com/b8bac45f-7f69-4c28-a26e-9888d9771bed-image.png'

Below we’ve used the Amazon 2020 shareholder letter as an example of how a document can be summarized using Reeko.

5. How it works

The Slack app is built using Bolt for Python framework. To connect the AWS S3 bucket and AWS Textract, use their respective boto3 clients.

Slack is receptive to all events around your workspace such as messages being posted, files being shared, users joining the team, and more. To listen to events, Slack uses the Events API. And to enable custom interactivity, you can use the Block Kit.

Slash commands work in the following way. First they consider the text you enter after the command itself and then send it to a URL. They then accept whatever the script returns and post it as a Slackbot message to the person who issued the command. There’s a set of 4 slash commands that make the slackbot.

In the application there are two Redis Modules:

The code below is used to initialize Redis in redisearch_connector.py. This is done by creating an index with the name file_index.

from redisearch import Client, TextField, AutoCompleter, Suggestion

class RedisSearchConnector():
def __init__(self):
self.index_name = 'file_index'
self.client = Client(self.index_name)
self.ac = AutoCompleter(self.index_name)

Use the following code to initialise Redis JSON in redisjson_connector.py.

from rejson import Client, Path

class RedisJsonConnector():
def __init__(self):
self.rj = Client(decode_responses=True)

And the code below is used to create an index in Redis

FT.CREATE file-index ON HASH SCHEMA file_name TEXT SORTABLE file_id TEXT created TEXT timestamp TEXT mimetype TEXT filetype TEXT user_id TEXT size

Conclusion: preventing lost files with Redis

The advanced capabilities of Redis allowed this Launchpad App to create an invaluable asset to remote workers - to never lose a file again on Slack. Redis Stack offered a simple yet effective way of transmitting data to and from the S3 bucket with no lags, no pauses and no delays whatsoever. You can discover more about the ins and outs of how this app was made by simply clicking here.

Reeko is an innovative application that joins our exciting collection of apps that we currently have on the Redis Launchpad. By using Redis, programmers from all over the world are creating breakthrough applications that are having an impact on daily lives… and you can too.

So how can you change the world using Redis? For more inspiration, make sure to check out the other applications we have on our Launchpad.

alt_text

Who built this application?

Sarthak Arora

Being only 20 years old, Sarthak is a young yet highly-advanced programmer who’s already a 2x international Hacong winner.

To discover more about his work and his activity on GitHub, you can check out his profile here.

References

- + \ No newline at end of file diff --git a/create/aws/terraform/index.html b/create/aws/terraform/index.html index 4cac7e47bd..906c9f0949 100644 --- a/create/aws/terraform/index.html +++ b/create/aws/terraform/index.html @@ -4,7 +4,7 @@ How to Deploy and Manage Redis Database on AWS Using Terraform | The Home of Redis Developers - + @@ -18,7 +18,7 @@ Within the block body (between { and }) are query constraints defined by the data source. Most arguments in this section depend on the data source, and indeed in this example card_type and last_four_numbers are all arguments defined specifically for the rediscloud_payment_method data source.

Configure Redis Enterprise Cloud programmatic access

In order to set up authentication with the Redis Enterprise Cloud provider, a programmatic API key must be generated for Redis Enterprise Cloud. The Redis Enterprise Cloud documentation contains the most up-to-date instructions for creating and managing your key(s) and IP access.

note

Flexible and Annual Redis Enterprise Cloud subscriptions can leverage a RESTful API that permits operations against a variety of resources, including servers, services, and related infrastructure. The REST API is not supported for Fixed or Free subscriptions.

 provider "rediscloud" { } # Example resource configuration
resource "rediscloud_subscription" "example" { # ... }

Prerequisites:

  • Install Terraform on MacOS.
  • Create a free Redis Enterprise Cloud account.
  • Create your first subscription.
  • Enable API

Step 1: Install Terraform on MacOS

Use Homebrew to install Terraform on MacOS as shown below:

 brew install terraform

Step 2: Sign up for a free Redis Enterprise Cloud account

Follow this tutorial to sign up for free Redis Enterprise Cloud account.

Redis Cloud

Step 3: Enable Redis Enterprise Cloud API

If you have a Flexible (or Annual) Redis Enterprise Cloud subscription, you can use a REST API to manage your subscription programmatically. The Redis Cloud REST API is available only to Flexible or Annual subscriptions. It is not supported for Fixed or Free subscriptions.

For security reasons, the Redis Cloud API is disabled by default. To enable the API: Sign in to your Redis Cloud subscription as an account owner. From the menu, choose Access Management.

When the Access Management screen appears, select the API Keys tab.

Terraform

If a Copy button appears to the right of the API account key, the API is enabled. This button copies the account key to the clipboard.

If you see an Enable API button, select it to enable the API and generate your API account key.

To authenticate REST API calls, you need to combine the API account key with an API user key to make API calls.

Terraform

Step 4: Create a main.tf file

It’s time to create an empty “main.tf” file and start adding the provider, resource and data sources as shown below:

 terraform {
required_providers {
rediscloud = {
source = "RedisLabs/rediscloud"
version = "0.2.2"
}
}
}
# Provide your credit card details
data "rediscloud_payment_method" "card" {
card_type = "Visa"
last_four_numbers = "XXXX"
}
# Generates a random password for the database
resource "random_password" "passwords" {
count = 2
length = 20
upper = true
lower = true
number = true
special = false
}
resource "rediscloud_subscription" "rahul-test-terraform" {
name = "rahul-test-terraform"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
cloud_provider {

provider = "AWS"
cloud_account_id = 1
region {
region = "us-east-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["us-east-1a"]
}
}
database {
name = "db-json"
protocol = "redis"
memory_limit_in_gb = 1
replication = true
data_persistence = "aof-every-1-second"
module {
name = "RedisJSON"
}
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = random_password.passwords[1].result
}
}

Step 5: Create an execution plan

The Terraform plan command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan, it reads the current state of any already existing remote objects to make sure that Terraform state is up to date. It then compares the current configuration to the prior state and then proposes a set of change actions that should make the remote object match the configuration.

 % terraform plan


Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create

Terraform will perform the following actions:

# random_password.passwords[0] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# random_password.passwords[1] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# rediscloud_subscription.rahul-test-terraform will be created
+ resource "rediscloud_subscription" "rahul-test-terraform" {
+ id = (known after apply)
+ memory_storage = "ram"
+ name = "rahul-test-terraform"
+ payment_method_id = "XXXX"
+ persistent_storage_encryption = true

+ cloud_provider {
+ cloud_account_id = "1"
+ provider = "AWS"

+ region {
+ multiple_availability_zones = false
+ networking_deployment_cidr = "10.0.0.0/24"
+ networks = (known after apply)
+ preferred_availability_zones = [
+ "us-east-1a",
]
+ region = "us-east-1"
}
}

+ database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 3 to add, 0 to change, 0 to destroy.

───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.

Step 6: Execute the action

The Terraform apply command executes the actions proposed in a Terraform plan.

 terraform apply


Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create

Terraform will perform the following actions:

# random_password.passwords[0] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# random_password.passwords[1] will be created
+ resource "random_password" "passwords" {
+ id = (known after apply)
+ length = 20
+ lower = true
+ min_lower = 0
+ min_numeric = 0
+ min_special = 0
+ min_upper = 0
+ number = true
+ result = (sensitive value)
+ special = false
+ upper = true
}

# rediscloud_subscription.rahul-test-terraform will be created
+ resource "rediscloud_subscription" "rahul-test-terraform" {
+ id = (known after apply)
+ memory_storage = "ram"
+ name = "rahul-test-terraform"
+ payment_method_id = "XXXX"
+ persistent_storage_encryption = true

+ cloud_provider {
+ cloud_account_id = "1"
+ provider = "AWS"

+ region {
+ multiple_availability_zones = false
+ networking_deployment_cidr = "10.0.0.0/24"
+ networks = (known after apply)
+ preferred_availability_zones = [
+ "us-east-1a",
]
+ region = "us-east-1"
}
}

+ database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 3 to add, 0 to change, 0 to destroy.

Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.

Enter a value: yes

random_password.passwords[0]: Creating...
random_password.passwords[1]: Creating...
random_password.passwords[1]: Creation complete after 0s [id=none]
random_password.passwords[0]: Creation complete after 0s [id=none]
rediscloud_subscription.rahul-test-terraform: Creating...
rediscloud_subscription.rahul-test-terraform: Still creating... [10s elapsed]
rediscloud_subscription.rahul-test-terraform: Still creating... [20s elapsed]
rediscloud_subscription.rahul-test-terraform: Creation complete after 8m32s [id=1649277]

Apply complete! Resources: 3 added, 0 changed, 0 destroyed.

Step 7: Verify the database

You can now verify the new database created under Subscription named “db-json.”

Deploy a Redis Database with Redis JSON modules on AWS using Terraform:

terraform {
required_providers {
rediscloud = {
source = "RedisLabs/rediscloud"
version = "0.2.2"
}
}
}
# Provide your credit card details
data "rediscloud_payment_method" "card" {
card_type = "Visa"
last_four_numbers = "XXXX"
}
# Generates a random password for the database
resource "random_password" "passwords" {
count = 2
length = 20
upper = true
lower = true
number = true
special = false
}
resource "rediscloud_subscription" "rahul-test-terraform" {
name = "rahul-test-terraform"
payment_method_id = data.rediscloud_payment_method.card.id
memory_storage = "ram"
cloud_provider {

provider = "AWS"
cloud_account_id = 1
region {
region = "us-east-1"
networking_deployment_cidr = "10.0.0.0/24"
preferred_availability_zones = ["us-east-1a"]
}
}
database {
name = "db-json"
protocol = "redis"
memory_limit_in_gb = 1
replication = true
data_persistence = "aof-every-1-second"
module {
name = "RedisJSON"
}
throughput_measurement_by = "operations-per-second"
throughput_measurement_value = 10000
password = random_password.passwords[1].result
}
}

Step 8: Cleanup

The Terraform destroy command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. While you will typically not want to destroy long-lived objects in a production environment, Terraform is sometimes used to manage ephemeral infrastructure for development purposes, in which case you can use terraform destroy’ to conveniently clean up all of those temporary objects once you are finished with your work.

% terraform destroy
random_password.passwords[0]: Refreshing state... [id=none]
random_password.passwords[1]: Refreshing state... [id=none]
rediscloud_subscription.rahul-test-terraform: Refreshing state... [id=1649277]

Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
- destroy

Terraform will perform the following actions:

# random_password.passwords[0] will be destroyed
- resource "random_password" "passwords" {
- id = "none" -> null
- length = 20 -> null
- lower = true -> null
- min_lower = 0 -> null
- min_numeric = 0 -> null
- min_special = 0 -> null
- min_upper = 0 -> null
- number = true -> null
- result = (sensitive value)
- special = false -> null
- upper = true -> null
}

# random_password.passwords[1] will be destroyed
- resource "random_password" "passwords" {
- id = "none" -> null
- length = 20 -> null
- lower = true -> null
- min_lower = 0 -> null
- min_numeric = 0 -> null
- min_special = 0 -> null
- min_upper = 0 -> null
- number = true -> null
- result = (sensitive value)
- special = false -> null
- upper = true -> null
}

# rediscloud_subscription.rahul-test-terraform will be destroyed
- resource "rediscloud_subscription" "rahul-test-terraform" {
- id = "1649277" -> null
- memory_storage = "ram" -> null
- name = "rahul-test-terraform" -> null
- payment_method_id = "XXXX" -> null
- persistent_storage_encryption = true -> null

- cloud_provider {
- cloud_account_id = "1" -> null
- provider = "AWS" -> null

- region {
- multiple_availability_zones = false -> null
- networking_deployment_cidr = "10.0.0.0/24" -> null
- networks = [
- {
- networking_deployment_cidr = "10.0.0.0/24"
- networking_subnet_id = "subnet-0055e8e3ee3ea796e"
- networking_vpc_id = ""
},
] -> null
- preferred_availability_zones = [
- "us-east-1a",
] -> null
- region = "us-east-1" -> null
}
}

- database {
# At least one attribute in this block is (or was) sensitive,
# so its contents will not be displayed.
}
}

Plan: 0 to add, 0 to change, 3 to destroy.

Do you really want to destroy all resources?
Terraform will destroy all your managed infrastructure, as shown above.
There is no undo. Only 'yes' will be accepted to confirm.

Enter a value: yes

rediscloud_subscription.rahul-test-terraform: Destroying... [id=1649277]

rediscloud_subscription.rahul-test-terraform: Destruction complete after 1m34s
random_password.passwords[0]: Destroying... [id=none]
random_password.passwords[1]: Destroying... [id=none]
random_password.passwords[0]: Destruction complete after 0s
random_password.passwords[1]: Destruction complete after 0s

Destroy complete! Resources: 3 destroyed.

Further References:

- + \ No newline at end of file diff --git a/create/azure/index.html b/create/azure/index.html index acf71f7987..04c494e62b 100644 --- a/create/azure/index.html +++ b/create/azure/index.html @@ -4,7 +4,7 @@ Azure Cache for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/azure/portal/index.html b/create/azure/portal/index.html index 6249e0ad35..203e4de7d2 100644 --- a/create/azure/portal/index.html +++ b/create/azure/portal/index.html @@ -4,7 +4,7 @@ Create Redis database on Azure Cache | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Create Redis database on Azure Cache

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT. Developers love Redis due to its speed, simplicity and performance.

The Azure cloud platform has more than 200+ products and cloud services designed to help you bring new solutions to life-to solve today's challenges and create the future. Azure services help you to build, run, and manage applications across multiple clouds, on-premises, and at the edge, with the tools and frameworks of your choice.

Azure Cache for Redis is a native fully-managed service on Microsoft Azure. Azure Cache for Redis offers both the Redis open-source (OSS Redis) and a commercial product from Redis (Redis Enterprise) as a managed service. It provides secure and dedicated Redis server instances and full Redis API compatibility. The service is operated by Microsoft, hosted on Azure, and accessible to any application within or outside of Microsoft Azure.

Azure Cache for Redis dashboard uses Azure Monitor to provide several options for monitoring your cache instances.Learn more Use Azure Monitor to:

  • View metrics
  • Pin metrics charts to the Startboard
  • Customize the date and time range of monitoring charts
  • Add and remove metrics from the charts
  • Set alerts when certain conditions are met

Step 1. Getting Started

Search for "azure redis cache " in the search dashboard and launch Azure Cache for Redis Enterprise

RedisLabs Azure Page

Step 2: Setup & Subscribe

RedisLabs Azure Page

Step 3: Configuring New Redis Cache Instance

RedisLabs Azure Page

Step 4: Connecting to Redis database

You can directly connect to the Redis cache instances using the Redis CLI command (redis-cli) as shown:

sudo redis-cli -h demos.redis.cache.windows.net -p 6379
demos.redis.cache.windows.net:6379>
tip

You can have multiple clients connected to a Redis database at the same time. The above Redis client command might require a password if you have setup authentication in your Redis configuration file. You can insert data to Redis using the SET command and then fetch it back with the GET command. You can also run the Redis INFO command to get the statistics about the health of the Redis server (for example, memory usage, Redis server load etc).

Resources

Next Steps

Redis Launchpad
- + \ No newline at end of file diff --git a/create/azurefunctions/index.html b/create/azurefunctions/index.html index fd39bcb202..7f7c3d2602 100644 --- a/create/azurefunctions/index.html +++ b/create/azurefunctions/index.html @@ -4,7 +4,7 @@ Getting Started with Azure Functions and Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Getting Started with Azure Functions and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

alt_text

Azure Functions is an event-based, serverless compute platform offered by Microsoft to accelerate and simplify serverless application development. It allows developers to write less code, build and debug locally without additional setup, and deploy and operate at scale in the cloud.

How it works

Azure Functions allows you to implement your system's logic into readily available blocks of code. These code blocks are called "functions." An Azure function's execution is triggered when an event is fired. Whenever demand for execution increases, more and more resources are allocated automatically to the service, and when requests fall, all extra resources and application instances drop off automatically. In short, as a developer, you can now focus on the pieces of code that matter most to you, and Azure Functions handles the rest.

Azure Functions provides as many or as few compute resources as needed to meet your application's demand. Providing compute resources on-demand is the essence of serverless computing in Azure Functions.

Benefits of Microsoft Azure Functions

  • Azure Functions provides automated and flexible scaling.
  • It allows you to build, debug, deploy, and monitor with integrated tools and built-in DevOps capabilities.
  • It supports a variety of programming languages such as C#, Java, JavaScript, Python, and PowerShell.
  • It allows you to use Functions extensions on Visual Studio and Visual Studio Code for faster and more efficient development on your local system.
  • With Azure Functions you can set up CI/CD with Azure Pipelines.
  • It’s a great solution for processing bulk data, integrating systems, working with IoT, and building simple APIs and microservices.
  • It’s used to break monolithic architectures into loosely coupled functions.
  • It allows you to deploy Functions to Kubernetes.

In this tutorial, you will learn how to get started with Azure Functions and Redis.

Getting started

  • Step 1. Log in to Microsoft Azure Portal
  • Step 2. Set up Azure Cache for Redis
  • Step 3. Configure Keys for Redis Cache
  • Step 4. Verify if Redis database is reachable remotely
  • Step 5. Install Homebrew on Mac
  • Step 6. Install Visual Studio Code
  • Step 7. Install the Azure Functions Core Tools
  • Step 8. Install the Azure Functions extension for Visual Studio Code
  • Step 9. Connect Azure Function to Azure account
  • Step 10. Clone the project repository
  • Step 11. Trigger the function
  • Step 12. Verify the Azure Functions app is working properly
  • Step 13. Seed the Redis database
  • Step 14. Run query using RedisInsight

Step 1. Log in to Microsoft Azure Portal

Create an Azure account with an active subscription by clicking this link: Create an account for free.

Acount Dashboard

Step 2. Set up “Azure Cache for Redis”

Type "Azure Cache for Redis" in the search section and select the service:

Azure Cache for Redis Service

Under "New Redis Cache" window, create a new resource group, select your preferred location and cache type:

Creating a new Redis Instance

Once you are done with the entries, click "Review + Create" button.

alt_text

Wait for few seconds to let deployment process to complete.

Deploying your Redis Instance

Once the deployment is complete, you will be provided with the deployment name, subscription details and resource group.

Redis Instance up and running

Step 3. Configure Keys for Redis Cache

You will need keys to log in to the Redis database. Click "Overview" option in the left sidebar to see the Primary key and save it for future reference.

Managing keys

Step 4. Verify if Redis database is accessible

redis-cli -h demorediss.redis.cache.windows.net -p 6379
demorediss.redis.cache.windows.net:6379> info modules
NOAUTH Authentication required.
demorediss.redis.cache.windows.net:6379> auth jsn9IdFXXXXXXXXXXXXXsAzCaDzLh6s=
OK
demorediss.redis.cache.windows.net:6379> get a1
"100"

Step 5. Install Homebrew on Mac

Install the Homebrew package manager by running this script:

/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"

Step 6. Install Visual Studio Code

Visual Studio Code is a lightweight but powerful source code editor that runs on your desktop and is available for Windows, macOS, and Linux. It comes with built-in support for JavaScript, TypeScript, and Node.js, and has a rich ecosystem of extensions for other languages (such as C++, C#, Java, Python, PHP, Go) and runtimes (such as .NET and Unity). Begin your journey with VS Code with these introductory videos.

Visual Studio Code download

Step 7. Install the Azure Functions Core Tools

brew tap azure/functions
brew install azure-functions-core-tools@4
# if upgrading on a machine that has 2.x or 3.x installed:
brew link --overwrite azure-functions-core-tools@4

Step 8. Install the Azure Functions extension for Visual Studio Code

Use the Azure Functions extension to quickly create, debug, manage, and deploy serverless apps directly from VS Code.

The Azure Functions extension for Visual Studio Code

Step 9. Connect Azure Function to Azure account

Configuring Azure in Visual Studio Code

Step 10. Clone the project repository

For this tutorial, we will be using a baby names counter app built using C#. To get started, we will first clone the repository:

git clone https://github.com/redis-developer/Baby-Names-Func


Add “Azure Cache for Redis” endpoint URL details in the local-settings.json file as shown below:

{
"IsEncrypted": false,
"Values": {
"FUNCTIONS_WORKER_RUNTIME": "dotnet"
"redisCacheConnectionString": "demorediss.redis.cache.windows.net"


}
}

Open the project with Visual Studio Code by running the following command:

cd Baby-Names-Func
code .

This will open VS Code. The function will automatically load into the plugin.

Project loaded into Visual Studio Code

Step 11. Trigger the function

Press F5 to automatically execute the function.

Executing the function

If you want to manually select the repository, choose .NET framework, etc., and then click “Create new project.”

Creating a new project

You will find the following output under VS Code screen:

     1>Done Building Project "/Users/ajeetraina/projects/Baby-Names-Func/RedisFunctions.csproj" (Clean target(s)).

Terminal will be reused by tasks, press any key to close it.

> Executing task: dotnet build /property:GenerateFullPaths=true /consoleloggerparameters:NoSummary <

Microsoft (R) Build Engine version 17.0.0+c9eb9dd64 for .NET
Copyright (C) Microsoft Corporation. All rights reserved.

Determining projects to restore...
All projects are up-to-date for restore.
RedisFunctions -> /Users/ajeetraina/projects/Baby-Names-Func/bin/Debug/net6.0/RedisFunctions.dll

Terminal will be reused by tasks, press any key to close it.

> Executing task: func host start <


Azure Functions Core Tools
Core Tools Version: 4.0.3971 Commit hash: d0775d487c93ebd49e9c1166d5c3c01f3c76eaaf (64-bit)
Function Runtime Version: 4.0.1.16815

[2022-03-01T07:51:01.383Z] Found /Users/ajeetraina/projects/Baby-Names-Func/RedisFunctions.csproj. Using for user secrets file configuration.

Functions:

CountBabyNames: [GET,POST] http://localhost:7071/api/getCount

IncrementBabyName: [GET,POST] http://localhost:7071/api/increment

For detailed output, run func with --verbose flag.


Step 12. Verify the Azure functions app is working properly

Output in the browser from running the application locally

Step 13. Seed the Redis database

Now, let us seed BabyNames data into the Redis database.

git clone https://github.com/slorello89/Seed-Baby-Names

If you connect to the Redis database and run the MONITOR command, you should see the data being inserted into the database as shown below:

1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Rowen" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Titus" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Braxton" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Alexander" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Finnegan" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Nasir" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Fabian" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Alexander" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Emilio" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Dax" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Johnny" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Mario" "1"
1646061655.966050 [0 122.171.48.244:60531] "SELECT" "0"
1646061655.966050 [0 122.171.48.244:60531] "CMS.INCRBY" "baby-names" "Lennox" "1"

Step 14. Run query using RedisInsight

Set up RedisInsight on your local system and get connected to the Redis database. Once connected, you should be able to run the following queries:

Redis Insight

> CMS.INFO baby-names
1) width
2) (integer) 1000
3) depth
4) (integer) 10
5) count
6) (integer) 100000

> CMS.QUERY baby-names Johnny
1) 109

Additional references:

- + \ No newline at end of file diff --git a/create/cloud/aws/index.html b/create/cloud/aws/index.html index b34bb6cdc6..b8e681ffd9 100644 --- a/create/cloud/aws/index.html +++ b/create/cloud/aws/index.html @@ -4,7 +4,7 @@ Create Database using AWS Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using AWS Cloud

Redis Enterprise Cloud on AWS is a fully Managed Redis Enterprise as a service. Designed for modern distributed applications, Redis Enterprise Cloud on AWS is known for its high performance, infinite scalability and true high availability.

Follow the below steps to setup Redis Enterprise Cloud hosted over AWS Cloud:

Step 1. Getting Started

Follow this link to register.

AWS Cloud

Step 2. Choose AWS Cloud

For the cloud provider, select Amazon AWS and choose Free plan.

AWS Cloud

Step 3. Create database

AWS Cloud

Step 4. Click "Activate"

AWS Cloud

- + \ No newline at end of file diff --git a/create/cloud/azure/index.html b/create/cloud/azure/index.html index bed15fdb63..26888b7579 100644 --- a/create/cloud/azure/index.html +++ b/create/cloud/azure/index.html @@ -4,7 +4,7 @@ Create Database using Azure Cache for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Azure Cache for Redis

Azure Cache for Redis is a native fully-managed service on Microsoft Azure. Azure Cache for Redis offers both the Redis open-source (OSS Redis) and a commercial product from Redis (Redis Enterprise) as a managed service. It provides secure and dedicated Redis server instances and full Redis API compatibility. The service is operated by Microsoft, hosted on Azure, and accessible to any application within or outside of Azure.

Step 1. Getting Started

Launch Azure Cache for Redis Enterprise & Flash

RedisLabs Azure Page

Step 2: Setup & Subscribe

RedisLabs Azure Page

Step 3: Configuring New Redis Cache

RedisLabs Azure Page

Step 4: Finalising the setup

RedisLabs Azure Page

Step 5: Connecting to Redis database

sudo redis-cli -h redislabs.redis.cache.windows.net -p 6379
redislabs.redis.cache.windows.net:6379>

Next Steps

- + \ No newline at end of file diff --git a/create/cloud/gcp/index.html b/create/cloud/gcp/index.html index 1f8722f11c..ef80e3d726 100644 --- a/create/cloud/gcp/index.html +++ b/create/cloud/gcp/index.html @@ -4,7 +4,7 @@ Create Database using Google Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Google Cloud

Redis Enterprise Cloud delivers fully managed Redis Enterprise as a Service. It offers all the capabilities of Redis Enterprise while taking care of all the operational aspects associated with operating Redis in the most efficient manner on Google Cloud Platform. Redis Enterprise Cloud is built on a complete serverless concept, so users don’t need to deal with nodes and clusters

Step 1. Getting Started

Launch Redis Enterprise Cloud page on Google Cloud Platform

Google Cloud

Step 2. Click "Manage via Redis Labs"

Google Cloud

Step 3. Create Subscription

Google Cloud

Step 4. Specify the database name

Google Cloud

Step 5. Enter sizing details

Google Cloud

Step 6: Review & Create

Google Cloud

Step 7. Verify the details

Google Cloud

Step 8. Finalising the setup

Google Cloud

Next Steps

- + \ No newline at end of file diff --git a/create/cloud/images/index.html b/create/cloud/images/index.html index a718742350..0d2e713cc9 100644 --- a/create/cloud/images/index.html +++ b/create/cloud/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/index.html b/create/cloud/index.html index 7d7bc4c707..df2a0bd268 100644 --- a/create/cloud/index.html +++ b/create/cloud/index.html @@ -4,7 +4,7 @@ Create Database using Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/rediscloud/images/index.html b/create/cloud/rediscloud/images/index.html index ab70efc842..afb5130ef1 100644 --- a/create/cloud/rediscloud/images/index.html +++ b/create/cloud/rediscloud/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/cloud/rediscloud/index.html b/create/cloud/rediscloud/index.html index b7f6ded75f..1e011383a7 100644 --- a/create/cloud/rediscloud/index.html +++ b/create/cloud/rediscloud/index.html @@ -4,7 +4,7 @@ Create Database using Redis Enterprise Cloud | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create Database using Redis Enterprise Cloud

Redis Enterprise Cloud is a fully managed cloud service by Redis. Built for modern distributed applications, Redis Enterprise Cloud enables you to run any query, simple or complex, at sub-millisecond performance at virtually infinite scale without worrying about operational complexity or service availability. With modern probabilistic data structures and extensible data models, including Search, JSON, Graph, and Time Series, you can rely on Redis as your data-platform for all your real-time needs.

Step 1. Create free cloud account

Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

My Image

image

Step 2. Create Your subscription

Next, you will have to create Redis Enterprise Cloud subscription. In the Redis Enterprise Cloud menu, click "Create your Subscription".

My Image

Step 3. Select the right Subscription Plan

Select "Fixed Plan" for low throughout application as for now.

My Image

Step 4. Select cloud vendor

For the cloud provider, select your preferred cloud (for demo purpose)

My Image

Step 5. Click "Create Subscription"

Finally, click on "Create Subscription" button.

My Image

You can now verify the subscription as shown below:

My Image

Step 6. Create database

Click "Create Database". Enter database name and your preferred module.

My Image

Step 7. Launch database

Click "Activate" and wait for few seconds till it gets activated. Once fully activated, you will see the database endpoints as shown below:

My Image

Redis Launchpad
- + \ No newline at end of file diff --git a/create/docker/index.html b/create/docker/index.html index c857799ba7..28b0ca69a4 100644 --- a/create/docker/index.html +++ b/create/docker/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/create/docker/nodejs-nginx-redis/index.html b/create/docker/nodejs-nginx-redis/index.html index 52f4c5c5b0..e5594e8a06 100644 --- a/create/docker/nodejs-nginx-redis/index.html +++ b/create/docker/nodejs-nginx-redis/index.html @@ -4,7 +4,7 @@ How to build and run a Node.js application using Nginx, Docker and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to build and run a Node.js application using Nginx, Docker and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Thanks to Node.js - Millions of frontend developers that write JavaScript for the browser are now able to write the server-side code in addition to the client-side code without the need to learn a completely different language. Node.js is a free, open-sourced, cross-platform JavaScript run-time environment. It is capable to handle thousands of concurrent connections with a single server without introducing the burden of managing thread concurrency, which could be a significant source of bugs.

Nginx-node

In this quickstart guide, you will see how to build a Node.js application (visitor counter) using Nginx, Redis and Docker.

What do you need?

  • Node.js: An open-source, cross-platform, back-end JavaScript runtime environment that runs on the V8 engine and executes JavaScript code outside a web browser.
  • Nginx: An open source software for web serving, reverse proxying, caching, load balancing, media streaming, and more.
  • Docker: a containerization platform for developing, shipping, and running applications.
  • Docker Compose: A tool for defining and running multi-container Docker applications.

Project structure

.
├── docker-compose.yml
├── redis
├── nginx
│   ├── Dockerfile
│   └── nginx.conf
├── web1
│   ├── Dockerfile
│   ├── package.json
│   └── server.js
└── web2
├── Dockerfile
├── package.json
└── server.js

Prerequisite:

– Install Docker Desktop

Visit https://docs.docker.com/desktop/mac/install/ to setup Docker Desktop for Mac or Windows on your local system.

Image1

info

Docker Desktop comes with Docker compose installed by default, hence you don't need to install it separately.

Step 1. Create a Docker compose file

Create an empty file with the below content and save it by name - "docker-compose.yml"

version: '3.9'
services:
redis:
image: 'redis:alpine'
ports:
- '6379:6379'
web1:
restart: on-failure
build: ./web1
ports:
- '81:5000'
web2:
restart: on-failure
build: ./web2
ports:
- '82:5000'
nginx:
build: ./nginx
ports:
- '80:80'
depends_on:
- web1
- web2

The compose file defines an application with four services redis, web1, web2 and nginx. When deploying the application, docker-compose maps port 80 of the web service container to port 80 of the host as specified in the file.

info

By default, Redis runs on port 6379. Make sure you don't run another instance of Redis on your system or port 6379 on the host is not being used by another container, otherwise the port should be changed.

Step 2. Create an nginx directory and add the below files:

File: nginx/nginx.conf

upstream loadbalancer {
server web1:5000;
server web2:5000;
}

server {
listen 80;
server_name localhost;
location / {
proxy_pass http://loadbalancer;
}
}

File: Dockerfile

FROM nginx
RUN rm /etc/nginx/conf.d/default.conf
COPY nginx.conf /etc/nginx/conf.d/default.conf

Step 3. Create a web directory and add the below files:

File: web/Dockerfile

FROM node:alpine

WORKDIR /usr/src/app

COPY ./package.json ./
RUN npm install
COPY ./server.js ./

CMD ["npm","start"]

File: web/package.json


"name": "web",
"version": "1.0.0",
"description": "Running Node.js and Express.js on Docker",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.17.2",
"redis": "3.1.2"
},
"author": "",
"license": "MIT"
}

File: web/server.js

const express = require('express');
const redis = require('redis');
const app = express();
const redisClient = redis.createClient({
host: 'redis',
port: 6379
});

app.get('/', function(req, res) {
redisClient.get('numVisits', function(err, numVisits) {
numVisitsToDisplay = parseInt(numVisits) + 1;
if (isNaN(numVisitsToDisplay)) {
numVisitsToDisplay = 1;
}
res.send('Number of visits is: ' + numVisitsToDisplay);
numVisits++;
redisClient.set('numVisits', numVisits);
});
});

app.listen(5000, function() {
console.log('Web application is listening on port 5000');
});

Step 4. Creating a web1 directory and add the below files:

File: Dockerfile

FROM node:alpine

WORKDIR /usr/src/app

COPY ./package*.json ./
RUN npm install
COPY ./server.js ./

CMD ["npm","start"]

File: server.js

const express = require('express');
const redis = require('redis');
const app = express();
const redisClient = redis.createClient({
host: 'redis',
port: 6379
});


app.get('/', function(req, res) {
redisClient.get('numVisits', function(err, numVisits) {
numVisitsToDisplay = parseInt(numVisits) + 1;
if (isNaN(numVisitsToDisplay)) {
numVisitsToDisplay = 1;
}
res.send('web1: Total number of visits is: ' + numVisitsToDisplay);
numVisits++;
redisClient.set('numVisits', numVisits);
});
});

app.listen(5000, function() {
console.log('Web app is listening on port 5000');
});

File: package.json

{
"name": "web1",
"version": "1.0.0",
"description": "Running Node.js and Express.js on Docker",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.17.2",
"redis": "3.1.2"
},
"author": "",
"license": "MIT"
}

Step 5. Deploy the application

Let us deploy the full-fledged app using docker-compose

$ docker-compose up -d
Creating nginx-nodejs-redis_redis_1 ... done
Creating nginx-nodejs-redis_web1_1 ... done
Creating nginx-nodejs-redis_web2_1 ... done
Creating nginx-nodejs-redis_nginx_1 ... done

Expected result

Listing containers must show three containers running and the port mapping as below:

docker-compose ps
Name Command State Ports
------------------------------------------------------------------------------------------
nginx-nodejs-redis_nginx_1 /docker-entrypoint.sh ngin Up 0.0.0.0:80->80/tcp
...
nginx-nodejs-redis_redis_1 docker-entrypoint.sh redis Up 0.0.0.0:6379->6379/tcp
...
nginx-nodejs-redis_web1_1 docker-entrypoint.sh npm Up 0.0.0.0:81->5000/tcp
start
nginx-nodejs-redis_web2_1 docker-entrypoint.sh npm Up 0.0.0.0:82->5000/tcp
start

Step 6. Testing the app

After the application starts, navigate to http://localhost:80 in your web browser or run:

curl localhost:80
curl localhost:80
web1: Total number of visits is: 1
curl localhost:80
web1: Total number of visits is: 2
$ curl localhost:80
web2: Total number of visits is: 3
$ curl localhost:80
web2: Total number of visits is: 4

Step 7. Monitoring Redis keys

If you want to monitor the Redis keys, you can use monitor command. Install redis-client in your Mac system using brew install redis and then directly connect to Redis container by issuing the below command:

% redis-cli
127.0.0.1:6379> monitor
OK
1646485507.290868 [0 172.24.0.2:34330] "get" "numVisits"
1646485507.309070 [0 172.24.0.2:34330] "set" "numVisits" "5"
1646485509.228084 [0 172.24.0.2:34330] "get" "numVisits"
1646485509.241762 [0 172.24.0.2:34330] "set" "numVisits" "6"
1646485509.619369 [0 172.24.0.4:52082] "get" "numVisits"
1646485509.629739 [0 172.24.0.4:52082] "set" "numVisits" "7"
1646485509.990926 [0 172.24.0.2:34330] "get" "numVisits"
1646485509.999947 [0 172.24.0.2:34330] "set" "numVisits" "8"
1646485510.270934 [0 172.24.0.4:52082] "get" "numVisits"
1646485510.286785 [0 172.24.0.4:52082] "set" "numVisits" "9"
1646485510.469613 [0 172.24.0.2:34330] "get" "numVisits"
1646485510.480849 [0 172.24.0.2:34330] "set" "numVisits" "10"
1646485510.622615 [0 172.24.0.4:52082] "get" "numVisits"
1646485510.632720 [0 172.24.0.4:52082] "set" "numVisits" "11"

Further References

- + \ No newline at end of file diff --git a/create/heroku/index.html b/create/heroku/index.html index f545d38f91..c213bd4a4d 100644 --- a/create/heroku/index.html +++ b/create/heroku/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Overview

The following links provide you with the available options to run apps on Heroku using Redis:

Create a Redis database on Heroku
- + \ No newline at end of file diff --git a/create/heroku/portal/index.html b/create/heroku/portal/index.html index 4525d92f39..c6f19e0ebf 100644 --- a/create/heroku/portal/index.html +++ b/create/heroku/portal/index.html @@ -4,7 +4,7 @@ Create a Redis database on Heroku | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Create a Redis database on Heroku


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Heroku is a cloud Platform as a Service (PaaS) supporting multiple programming languages that is used as a web application deployment model. Heroku lets the developer build, run and scale applications in a similar manner across all the languages(Java, Node.js, Scala, Clojure, Python, PHP, Ruby and Go).

Using Redis Cloud directly

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Redis Cloud is a fully-managed cloud service for hosting and running your Redis dataset in a highly-available and scalable manner, with predictable and stable top performance. Redis Enterprise cloud allows you to run Redis server over the Cloud and access instance via multiple ways like RedisInsight, redis command line as well as client tools. You can quickly and easily get your apps up and running with Redis Cloud through its Redis Heroku addons , just tell us how much memory you need and get started instantly with your first Redis database. You can then add more Redis databases (each running in a dedicated process, in a non-blocking manner) and increase or decrease the memory size of your plan without affecting your existing data.

::tip INFO Heroku addons are set of tools and services for developing, extending, and operating your app. :::

You can quickly and easily get your apps up and running with Redis Cloud directly. Follow the below steps:

Step 1. Create Redis Cloud

Create your free Redis Cloud account by visiting this link

recloud

Follow this link to create a Redis Cloud subscription and database. Once you create the database, you will be provisioned with a unique database endpoint URL, port and password. Save these for future reference.

Before you proceed with heroku redis, ensure that you can connect to Redis instance and verify if it is accessible via redis-cli command. You can run info command that is available in redis client software to see the version, memory usage, stats, and modules enabled in the Redis cloud database.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link.

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Cloud

For this demonstration, we will be using a Sample Rate Limiting application.

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-python

Run the commands below to get a functioning Git repository that contains a simple application as well as a package.json file.

 heroku create
Creating app... done, ⬢ lit-bayou-75670
https://lit-bayou-75670.herokuapp.com/ | https://git.heroku.com/lit-bayou-75670.git

heroku

Step 6. Setting up environment variables

Follow this link to create a Redis Cloud subscription and database connection as shown below: Go to the Heroku dashboard, click "Settings" and set REDIS_URL and REDIS_PASSWORD under the Config Vars.

note

The Redis URL endpoint is unique and might be different in your case. Please enter the values accordingly

Refer to Step 1 for the correct values to use.

heroku

Step 7. Pushing the code to Git

 git push heroku
remote: -----> Build succeeded!
remote: -----> Discovering process types
remote: Procfile declares types -> web
remote:
remote: -----> Compressing...
remote: Done: 32.9M
remote: -----> Launching...
remote: Released v5
remote: https://lit-bayou-75670.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/lit-bayou-75670.git
* [new branch] main -> main

Check the logs:

 heroku logs --tail
2021-03-27T03:48:30.000000+00:00 app[api]: Build succeeded
2021-03-27T03:48:33.956884+00:00 heroku[web.1]: Starting process with command `node server/index.js`
2021-03-27T03:48:36.196827+00:00 app[web.1]: App listening on port 11893

Step 8. Accessing the app

heroku

Redis Launchpad
- + \ No newline at end of file diff --git a/create/images/index.html b/create/images/index.html index 819437643d..70b84d100f 100644 --- a/create/images/index.html +++ b/create/images/index.html @@ -4,7 +4,7 @@ images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/create/index.html b/create/index.html index 1e0faafabb..6082caedc7 100644 --- a/create/index.html +++ b/create/index.html @@ -4,7 +4,7 @@ Create a Redis Database - Quick Starts | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Create a Redis Database - Quick Starts

The following quick starts shows various ways of how to get started and create a new Redis database:

Getting started with Redis Functions
Create Redis database on Heroku
Create Redis database on Azure Cache
Create Redis database on AWS
Create Redis database on Docker
Create Redis database on Kubernetes Platform
- + \ No newline at end of file diff --git a/create/jenkins/index.html b/create/jenkins/index.html index 70bbd5e92d..af403a027e 100644 --- a/create/jenkins/index.html +++ b/create/jenkins/index.html @@ -4,7 +4,7 @@ How to Deploy a Redis Enterprise Database from a Jenkins Pipeline | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Deploy a Redis Enterprise Database from a Jenkins Pipeline


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis
Profile picture for Matthew Royal
Author:
Matthew Royal, Consulting Engineer at Redis

Jenkins is currently the most popular CI(Continuous Integration) tool, with ~15M users. It is an open source automation server which enables developers to reliably build, test, and deploy their software. It was forked in 2011 from a project called Hudson after a dispute with Oracle, and is used for Continuous Integration and Continuous Delivery (CI/CD) and test automation. Jenkins is based on Java and provides over 1700 plugins to automate your developer workflow and save a lot of your time in executing your repetitive tasks.

image

Source: Datanyze market analysis

Jenkins Pipeline performs Continuous Delivery tasks declared in a Jenkinsfile stored alongside code. The Pipeline plugin has a fairly comprehensive tutorial checked into its source tree. Plugins are the primary means of enhancing the functionality of a Jenkins environment to suit organization- or user-specific needs. Using a Pipeline, you can configure Jenkins to automatically deploy key pieces of infrastructure, such as a Redis database.

Architecture

Jenkins Pipelines are the Continuous Delivery (CD) side of Jenkins. They use a Jenkinsfile declarative script to define the behavior of the pipeline. You can script actions in Groovy and run shell scripts from it, so you can make it do pretty much anything.

The Jenkinsfile instructs Jenkins to export some environment variables from the Credentials store in order to connect to the Redis server, then executes the Python pipeline script with the Deployment Configuration file given as a parameter. An example deployment-configuration-file.json looks like:

{
"database": {
"name": "made-with-jenkins",
"port": 12345,
"size": "S",
"operation": "CREATE"
}
}

The Python script uses predefined JSON template files that create Redis databases of fixed t-shirt sizes (S, M, L, XL). The Deployment Config file tells the Python script what the desired database name, port, and size are. A sample template file looks like:

{
"name": "{NAME}",
"type": "redis",
"memory_size": 343597383
}

The following is an architectural diagram of how a Jenkins pipeline adds a database to a Redis cluster.

alt_text

Process

  1. The Jenkins pipeline clones a remote git repository, containing the application code and the pipeline code.
  2. The Redis host, port, user, and password are decrypted from the credentials store and are exported as Environment variables.
  3. Jenkins runs the Python pipeline script, specifying the deployment configuration file in the git repo.
  4. The Python script uses the deployment configuration file to choose and customize a pre-populated template to use as the body of the REST create database request to Redis.

List of Pipeline Code Files

Configuring Jenkins

Installing Jenkins

You can use Docker Desktop to quickly get a Jenkins instance up and running, exposing ports 8080 (web GUI) and 50000 (inbound agents).

docker run --name jenk -p 8080:8080 -p 50000:50000 jenkins/jenkins:lts-jdk11

The installation will generate a first-run password in the docker-cli output.

Then open the Jenkins URL http://localhost:8080/ and enter the password to unlock your instance and begin installation.

alt_text

Choose "Install suggested plugins" to perform the Jenkins configuration.

alt_text

Wait for the plugins to complete the installation process.

alt_text

Next, you’re prompted to create your admin user.

alt_text

Congratulations! Jenkins is ready!

alt_text

Installing Python and custom libraries

If you use an existing instance of Jenkins server, you can install Python and the custom libraries from the command line interface of that machine.

Docker instances of Jenkins can be accessed by shell using the following command:

docker exec -it -u root jenk bash

The Python pipeline script requires the libraries click and requests. It also requires Python.

apt-get update
apt-get install -y python3-pip

pip install --upgrade pip
pip install click
pip install requests

Alternatively, if you are creating a new Jenkins from scratch, you can include these dependencies in a separate Dockerfile that builds off the base Jenkins image:

FROM jenkins:latest
USER root
RUN apt-get update
RUN apt-get install -y python-pip

# Install app dependencies
RUN pip install --upgrade pip
RUN pip3 install click
RUN pip3 install requests

Add credentials to Secret Store

Using the left-side menu, select Manage Jenkins, then select Manage Credentials, then click the link (global).

alt_text

alt_text

alt_text

From here, you can specify Kind: Secret text for the 4 secrets required to connect with the Redis REST endpoint:

  • REDIS_SERVER_FQDN
    • Set to the 'https://server-address' of the target Redis instance.
  • REDIS_SERVER_PORT
    • Set to the Redis REST API port (default 9443).
  • REDIS_USER
    • Set to the Redis admin user allowed to create databases.
  • REDIS_PASS
    • Set to the Redis admin user's password.

alt_text

If you are using a private code repository, you may also wish to include a Personal Access Token here.

Create the Jenkins pipeline

From the dashboard, click New Item.

alt_text

Enter in a name for the pipeline, and choose the Pipeline type.

alt_text

Connect GitHub repository

From the Pipeline configuration page that appears, check the GitHub box and enter the git clone URL, complete with any credentials needed to read the repository. For GitHub access, the password should be a Personal Access Token rather than the actual user password.

alt_text

Redis pipeline Jenkinsfile

Scrolling down on this page to the Advanced Project Options, you can either past in the Jenkinsfile, or you can specify the filename if the file exists in the git repository.

alt_text

Here is an example Jenkinsfile containing the mapping of Credentials to the environment variables, and 2 separate stages – a Hello World which always succeeds, and a build stage that invokes the Python script. Paste this into the pipeline script section.

pipeline {
agent any

environment {
REDIS_SERVER_FQDN = credentials('REDIS_SERVER_FQDN')
REDIS_SERVER_PORT = credentials('REDIS_SERVER_PORT')
REDIS_USER = credentials('REDIS_USER')
REDIS_PASS = credentials('REDIS_PASS')
}

stages {
stage('Hello') {
steps {
echo 'Hello World'
}
}

stage('build') {
steps {
git branch: 'main', url: 'https://github.com/masyukun/redis-jenkins-pipeline.git'
sh 'python3 jenkins-re-pipeline.py --deployfile deployment-configuration-file.json'
}
}
}
}

Click "Save" when the job spec is complete.

Run the Jenkins pipeline

Click on the pipeline you created:

alt_text

Click the "Build Now" icon on the left side menu.

alt_text

Click the Status icon on the left side menu in order to see the results of all the output from each of the stages of your pipeline.

alt_text

Hover over the build stage and click the Logs button of the most recent build in order to see the Python script’s output.

alt_text

Sample output: you should see a verbose response from Redis’s REST service in the “Shell Script” accordion pane.

There’s also a “Git” output log, in case you need to debug something at that level. Any time you update the branch in the remote git repository, you should see evidence in that log that the latest changes have successfully checked out into the local Jenkins git repository.

alt_text

Open your Redis Enterprise Secure Management UI at https://servername:8443 and click on the databases menu item to verify that your database was created with the name, port, and size specified in the deployment-configuration-file.json file.

alt_text

Congratulations! You have deployed a Redis Enterprise database using a Jenkins Pipeline!

The GitHub repository is currently: https://github.com/masyukun/redis-jenkins-pipeline

Further Reading

- + \ No newline at end of file diff --git a/create/kubernetes/index.html b/create/kubernetes/index.html index 9d97bfb60c..e24dad7fc9 100644 --- a/create/kubernetes/index.html +++ b/create/kubernetes/index.html @@ -4,7 +4,7 @@ Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/create/kubernetes/kubernetes-operator/index.html b/create/kubernetes/kubernetes-operator/index.html index 254b70e3ca..9211f8d21d 100644 --- a/create/kubernetes/kubernetes-operator/index.html +++ b/create/kubernetes/kubernetes-operator/index.html @@ -4,7 +4,7 @@ Kubernetes Operator: What It Is and Why You Should Really Care About It | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Kubernetes Operator: What It Is and Why You Should Really Care About It


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

My Image

Kubernetes is popular due to its capability to deploy new apps at a faster pace. Thanks to "Infrastructure as data" (specifically, YAML), today you can express all your Kubernetes resources such as Pods, Deployments, Services, Volumes, etc., in a YAML file. These default objects make it much easier for DevOps and SRE engineers to fully express their workloads without the need to learn how to write code in a programming language like Python, Java, or Ruby.

Kubernetes is designed for automation. Out of the box, you get lots of built-in automation from the core of Kubernetes. It can speed up your development process by making easy, automated deployments, updates (rolling update), and by managing your apps and services with almost zero downtime. However, Kubernetes can’t automate the process natively for stateful applications. For example, say you have a stateful workload, such as a database application, running on several nodes. If a majority of nodes go down, you’ll need to reload the database from a specific snapshot following specific steps. Using existing default objects, types, and controllers in Kubernetes, this would be impossible to achieve.

Think of scaling nodes up, or upgrading to a new version, or disaster recovery for your stateful application — these kinds of operations often need very specific steps, and typically require manual intervention. Kubernetes cannot know all about every stateful, complex, clustered application. Kubernetes, on its own, does not know the configuration values for, say, a Redis cluster, with its arranged memberships and stateful, persistent storage. Additionally, scaling stateful applications in Kubernetes is not an easy task and requires manual intervention.

Stateful vs Stateless Applications

Let’s try to understand the difference between stateful versus stateless applications with a simple example. Consider a Kubernetes cluster running a simple web application (without any operator). The YAML file below allows you to create two replicas of NGINX (a stateless application).

 apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: web
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80

In the example above, a Deployment object named nginx-deployment is created under a namespace “web,” indicated by the .metadata.name field. It creates two replicated Pods, indicated by the .spec.replicas field. The .spec.selector field defines how the Deployment finds which Pods to manage. In this case, you select a label that is defined in the Pod template (app: nginx). The template field contains the following subfields: the Pods are labeled app: nginx using the .metadata.labels field and the Pod template's specification indicates that the Pods run one container, nginx, which runs the nginx Docker Hub image at version 1.14.2. Finally, it creates one container and names it nginx.

Run the command below to create the Deployment resource:

kubectl create -f nginx-dep.yaml

Let us verify if the Deployment was created successfully by running the following command:

 kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 2/2 2 2 63s

The example above shows the name of the Deployment in the namespace. It also displays how many replicas of the application are available to your users. You can also see that the number of desired replicas that have been updated to achieve the desired state is 2.

alt_text

You can run the kubectl describe command to get detailed information of deployment resources. To show details of a specific resource or group of resources:

 kubectl describe deploy
Name: nginx-deployment
Namespace: default
CreationTimestamp: Mon, 30 Dec 2019 07:10:33 +0000
Labels: <none>
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx
Replicas: 2 desired | 2 updated | 2 total | 0 available | 2 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx
Containers:
nginx:
Image: nginx:1.7.9
Port: 80/TCP
Host Port: 0/TCP
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available False MinimumReplicasUnavailable
Progressing True ReplicaSetUpdated
OldReplicaSets: <none>
NewReplicaSet: nginx-deployment-6dd86d77d (2/2 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 90s deployment-controller Scaled up replica set nginx-deployment-6dd86d77d to 2

A Deployment is responsible for keeping a set of Pods running, but it’s equally important to expose an interface to these Pods so that the other external processes can access them. That’s where the Service resource comes in. The Service resource lets you expose an application running in Pods to be reachable from outside your cluster. Let us create a Service resource definition as shown below:

apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app: nginx
ports:
- port: 80
targetPort: 80
type: LoadBalancer

The above YAML specification creates a new Service object named "nginx-service," which targets TCP port 80 on any Pod with the app=nginx label.

 kubectl get svc -n web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-service LoadBalancer 10.107.174.108 localhost 80:31596/TCP 46s

alt_text

Let’s scale the Deployment to 4 replicas. We are going to use the kubectl scale command, followed by the deployment type, name, and desired number of instances. The output is similar to this:

kubectl scale deployments/nginx-deployment --replicas=4
deployment.extensions/nginx-deployment scaled

The change was applied, and we have 4 instances of the application available. Next, let’s check if the number of Pods changed. There should now be 4 Pods running in the cluster (as shown in the diagram below)

alt_text

 kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 4/4 4 4 4m

There are 4 Pods, with different IP addresses. The change was registered in the Deployment events log.

 kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-6dd86d77d-b4v7k 1/1 Running 0 4m32s 10.1.0.237 docker-desktop none none
nginx-deployment-6dd86d77d-bnc5m 1/1 Running 0 4m32s 10.1.0.236 docker-desktop none none
nginx-deployment-6dd86d77d-bs6jr 1/1 Running 0 86s 10.1.0.239 docker-desktop none none
nginx-deployment-6dd86d77d-wbdzv 1/1 Running 0 86s 10.1.0.238 docker-desktop none none

Deleting one of the web server Pods triggers work in the control plane to restore the desired state of four replicas. Kubernetes starts a new Pod to replace the deleted one. In this excerpt, the replacement Pod shows a STATUS of ContainerCreating:

 kubectl delete pod nginx-deployment-6dd86d77d-b4v7k

You will notice that the Nginx static web server is interchangeable with any other replica, or with a new Pod that replaces one of the replicas. It doesn’t store data or maintain state in any way. Kubernetes doesn’t need to make any special arrangements to replace a failed Pod, or to scale the application by adding or removing replicas of the server. Now you might be thinking, what if you want to store the state of the application? Great question.

Scaling stateful application is hard

Scaling stateless applications in Kubernetes is easy but it’s not the same case for stateful applications. Stateful applications require manual intervention. Bringing Pods up and down is not that simple. Each Node has an identity and some data attached to it. Removing a Pod means losing its data and disrupting the system.

alt_text

Consider a Kubernetes cluster with 6 worker Nodes hosting a Nginx web application connected to a persistent volume as shown above. Here is the snippet of StatefulSets YAML file:


apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

Kubernetes makes physical storage devices available to your cluster in the form of objects called Persistent Volumes. Each of these Persistent Volumes is consumed by a Kubernetes Pod by issuing a PersistentVolumeClaim object, also known as PVC. A PVC object lets Pods use storage from Persistent Volumes. Imagine a scenario in which we want to downscale a cluster from 5 Nodes to 3 Nodes. Suddenly removing 2 Nodes at once is a potentially destructive operation. This might lead to the loss of all copies of the data. A better way to handle Node removal would be to first migrate data from the Node to be removed to other Nodes in the system before performing the actual Pod deletion. It is important to note that the StatefulSet controller is necessarily generic and cannot possibly know about every possible way to manage data migration and replication. In practice, however, StatefulSets are rarely enough to handle complex, distributed stateful workload systems in production environments.

Now the question is, how to solve this problem? Enter Operators. Operators were developed to handle the sophisticated, stateful applications that the default Kubernetes controllers aren’t able to handle. While Kubernetes controllers like StatefulSets are ideal for deploying, maintaining, and scaling simple stateless applications, they are not equipped to handle access to stateful resources, or to upgrade, resize, and backup of more elaborate clustered applications such as databases. A Kubernetes Operator fills in the gaps between the capabilities and automation provided by Kubernetes and how your software uses Kubernetes for automation of tasks relevant to your software.

An Operator is basically an application-specific controller that can help you manage a Kubernetes application. It is a way to package, run, and maintain a Kubernetes application. It is designed to extend the capabilities of Kubernetes, and also simplify application management. This is especially useful for stateful applications, which include persistent storage and other elements external to the application, and may require extra work to manage and maintain.

tip

The Operator Framework is an open source project that provides developer and runtime Kubernetes tools, enabling you to accelerate the development of an operator. Learn more about operator framework here

Functions of Kubernetes Operators

A Kubernetes Operator uses the Kubernetes API server to create, configure, and manage instances of complex stateful applications on behalf of a Kubernetes user. There is a public repository called OperatorHub.io that is designed to be the public registry for finding Kubernetes Operator backend services. With Operator Hub, developers can easily create an application based on an operator without going through the complexity of crafting an operator from scratch.

alt_text

Below are a few examples of popular Kubernetes Operators and their functions and capabilities.

Kubernetes Operators:

  • Helps you deploy an application on demand (for example, Argo CD operator (Helm is a declarative, GitOps continuous delivery tool for Kubernetes that helps with easy installation and configuration on demand)
  • Helps you install applications with the required configurations and number of application instances
  • Allows you to take and restore backups of the application state (for example, Velero operator manages disaster recovery, backup, and restoration of cluster components such as pv, pvc, deployments, etc., to aid in disaster recovery)
  • Handles the upgrades of the application code plus the changes, such as database schema (for example, Flux is a continuous delivery solution for Kubernetes that allows automating updates to configuration when there is new code to deploy)
  • Can manage a cluster of database servers (for example, MariaDB operator creates MariaDB server and database easily by defining simple custom resource)
  • Can install a database cluster of a declared software version and number of members
  • Scale applications in or out
  • Continues to monitor an application as it runs (for example, Prometheus Operator simplifies the deployment and configuration of Prometheus, Alertmanager, and related monitoring components)
  • Initiate upgrades, automated backups, and failure recovery, simulating failure in all or part of your cluster to test its resilience
  • Allows you to publish a service to applications that don’t support Kubernetes APIs to discover them

How does an Operator work?

Operators work by extending the Kubernetes control plane and API server. Operators allows you to define a Custom Controller that watches your application and performs custom tasks based on its state. The application you want to watch is usually defined in Kubernetes as a new object: a Custom Resource (CR) that has its own YAML spec and object type that is well understood by the API server. That way, you can define any specific criteria in the custom spec to watch out for, and reconcile the instance when it doesn’t match the spec. The way an Operator’s controller reconciles against a spec is very similar to native Kubernetes controllers, though it is using mostly custom components.

What is the Redis Enterprise Operator?

Redis has created an Operator that deploys and manages the lifecycle of a Redis Enterprise Cluster. The Redis Enterprise Operator is the fastest, most efficient way to deploy and maintain a Redis Enterprise cluster in Kubernetes. The Operator creates, configures, and manages Redis Enterprise deployments from a single Kubernetes control plane. This means that you can manage Redis Enterprise instances on Kubernetes just by creating native objects, such as a Deployment, ReplicaSet, StatefulSet, etc. Operators allow full control over the Redis Enterprise cluster lifecycle.

The Redis Enterprise Operator acts as a custom controller for the custom resource Redis Enterprise Cluster, or “REC”, which is defined through Kubernetes CRD (customer resource definition) and deployed with a YAML file.The Redis Enterprise Operator functions as the logic “glue” between the Kubernetes infrastructure and the Redis Enterprise cluster.

How does the Redis Enterprise Operator work?

alt_text

The Redis Enterprise Operator supports two Custom Resource Definitions (CRDs):

  • Redis Enterprise Cluster (REC): An API to create Redis Enterprise clusters. Note that only one cluster is supported per Operator deployment.
  • Redis Enterprise Database (REDB): An API to create Redis databases running on the Redis Enterprise cluster. Note that the Redis Enterprise Operator is namespaced. High-level architecture and overview of the solution can be found HERE.

This is how it works:

  1. First, the Redis Enterprise cluster custom resource (“CR” for short) is read and validated by the operator for a cluster specification.
  2. Secondly, cluster StatefulSet, service rigger, cluster admin secrets, RS/UI services are created.
  3. A Redis Enterprise Database CR is read and validated by the operator.
  4. The database is created on the cluster and the database access credentials are stored in a Kubernetes secret object.
  5. The service rigger discovers the new database and configures the Kubernetes service for the database.
  6. An application workload uses the database secret and service for access to data.

Example of Operator automation

Consider the YAML file below:

apiVersion: app.redislabs.com/v1
kind: RedisEnterpriseCluster
metadata:
name: rec
spec:
# Add fields here
nodes: 3

If you change the number of nodes to 5, the Operator talks to StatefulSets, and changes the number of replicas from 3 to 5. Once that happens, Kubernetes will take over and bootstrap new Nodes one at a time, deploying Pods accordingly. As each becomes ready, the new Nodes join the cluster and become available to Redis Enterprise master Nodes.

alt_text

apiVersion: app.redislabs.com/v1
kind: RedisEnterpriseDatabase
metadata:
name: redis-enterprise-database
spec:
redisEnterpriseCluster:
name: redis-enterprise
Memory: 2G

alt_text

In order to create a database, the Operator discovers the resources, talks to the cluster RestAPI, and then it creates the database. The server talks to the API and discovers it. The DB creates a Redis database service endpoint for that database and that will be available.

In the next tutorial, you will learn how to get started with the Redis Enterprise Kubernetes Operator from scratch, including how to perform non-trivial tasks such as backup, restore, horizontal scaling, and much more. Stay tuned!

References

- + \ No newline at end of file diff --git a/create/redis-functions/index.html b/create/redis-functions/index.html index 69144fe528..e2cd0438da 100644 --- a/create/redis-functions/index.html +++ b/create/redis-functions/index.html @@ -4,7 +4,7 @@ Getting started with Redis Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Getting started with Redis Functions


Profile picture for Elena Kolevska
Author:
Elena Kolevska, Technical Enablement Manager, EMEA at Redis

The most impactful addition to Redis version 7.0 is Redis Functions - a new programmability option, improving on scripts by adding modularity, reusability, and better overall developer experience.

Functions are, in contrast to scripts, persisted in the .rdb and .aof files as well as automatically replicated to all the replicas, which makes them a first-class citizen of Redis.

Redis has the capability of supporting multiple execution engines so in one of the future releases we’ll be able to write Redis Functions in Lua, Javascript, and more languages, but at the moment (Redis v7.0) the only supported language is Lua.

A common pain point for developers is to maintain a consistent view of data entities through a logical schema. Redis Functions are ideally suited for solving this problem and in this tutorial, we will demonstrate just that - we’ll create a library with two functions; the first one will make sure that we can automatically set _created_at and _updated_at timestamps for hash keys and the second one will simply update the _updated_at timestamp without changing the other elements, simulating the “touch” Unix function. Let's go!

Environment setup

First, let’s set up a working environment with Redis 7. You can follow the installation instructions in the guides below, according to your operating system:

Alternatively, you can spin up a Docker container with Redis Stack:

$ docker run -p 6379:6379 --name redis-7.0 -it --rm redis/redis-stack:7.0.0-RC4
note

In the rest of this tutorial we’ll use the $ character to indicate that the command needs to be run on the command prompt and redis-cli> to indicate the same for a redis-cli prompt.

Warm-Up

Now that we have our Redis server running, we can create a file named mylib.lua and in it create a function named hset that would receive the keys and arguments we pass on the command line as parameters.

Functions in Redis are always a part of a library, and a single library can have multiple functions.

For starters, let's create a simple function that returns "Hello Redis 7.0" and save it in the mylib.lua file.

#!lua name=mylib

local function hset(keys, args)
return "Hello Redis 7.0"
end

The first line specifies that we want to use the Lua engine to run this function and that its name is mylib.The name is the library identifier and we will use it every time we need to update it.

Next, we need to register this function so it can be accessed through the Functions api. In the registration we specify that the function hset can be called with the name of my_hset:

redis.register_function('my_hset', hset)

The full code, so far, is:

#!lua name=mylib
local function hset(keys, args)
return "Hello Redis 7.0"
end

redis.register_function('my_hset', hset)

Before we can call the function on the command line, we need to load and register it with the Redis server:

$ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD

Finally, let’s run the function we registered:

redis-cli> FCALL my_hset 1 foo

You should see the greeting "Hello Redis 7.0" as a response.

Maintaining a consistent view of data entities through a logical schema

We're now ready to start working on the requirement. First, let's implement the part that adds an updatedat timestamp:

#!lua name=mylib
local function hset(keys, args)
local hash = keys[1] -- Get the key name
local time = redis.call('TIME')[1] -- Get the current time from the Redis server

-- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
table.insert(args, '_updated_at')
table.insert(args, time)

-- Run HSET with the updated argument list
return redis.call('HSET', hash, unpack(args))
end

redis.register_function('my_hset', hset)

After you updated the code, you will have to reload the library in the Redis server, using the replace argument to specify that you want to overwrite the previous version:

$ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

If you try to create and update a hash through our function now, you will see that a timestamp is automatically added to it:

redis-cli> FCALL my_hset 1 foo k1 v1 k2 v2
3

redis-cli> HGETALL foo
1) "k1"
2) "v1"
3) "k2"
4) "v2"
5) "_updated_at"
6) "1643581494"

If we try to update the same key, we will see that the _updated_at timestamp got updated too:

redis-cli> FCALL my_hset 1 foo k4 v4
1

redis-cli> HGETALL foo
1) "k1"
2) "v1"
3) "k2"
4) "v2"
5) "_updated_at"
6) "1643581580"
7) "k4"
8) "v4"

Now let's add the logic that checks if the key is being created or updated, and adds the _created_at timestamp accordingly:

#!lua name=mylib
local function hset(keys, args)
local hash = keys[1] -- Get the key name
local time = redis.call('TIME')[1] -- Get the current time from the Redis server

-- Check if the key exists and if not - add a `_created_at` timestamp
local exists = redis.call('exists', hash)
if exists==0 then
table.insert(args, '_created_at')
table.insert(args, time)
end

-- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
table.insert(args, '_updated_at')
table.insert(args, time)

-- Run HSET with the updated argument list
return redis.call('HSET', hash, unpack(args))
end

redis.register_function('my_hset', hset)

Reload the library:

$ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

And try to create a new key:

redis-cli> FCALL my_hset 1 bar k1 v1 k2 v2
4

redis-cli> HGETALL bar
1) "k1"
2) "v1"
3) "k2"
4) "v2"
5) "_updated_at"
6) "1643581710"
7) "_created_at"
8) "1643581710"

Both a _created_at and _updated_at timestamps were added. If we update the key, we will see that the _updated_at timestamp will change, while the _created_at will stay the same:

redis-cli> FCALL my_hset 1 bar k4 v4
1

redis-cli> HMGET bar _created_at _updated_at
1) "1643581710"
2) "1643581992"

The second requirement was to implement a function that will allow us to update the _updated_at timestamp without updating any other fields. For that, we'll have to create a new function in our library:

local function touch(keys, args)
local time = redis.call('TIME')[1]
return redis.call('HSET', keys[1], '_updated_at', time)
end

And we should also add the function registration:

redis.register_function('my_touch', touch)

The full code will now look like this:

#!lua name=mylib
local function hset(keys, args)
local hash = keys[1] -- Get the key name
local time = redis.call('TIME')[1] -- Get the current time from the Redis server

local exists = redis.call('exists', hash)
if exists==0 then
table.insert(args, '_created_at')
table.insert(args, time)
end

-- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
table.insert(args, '_updated_at')
table.insert(args, time)

-- Run HSET with the updated argument list
return redis.call('HSET', hash, unpack(args))
end

local function touch(keys, args)
local time = redis.call('TIME')[1]
return redis.call('HSET', keys[1], '_updated_at', time)
end

redis.register_function('my_hset', hset)
redis.register_function('my_touch', touch)

Reload the updated library:

$ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

And try running the new function and confirm that the _updated_at timestamp has indeed changed:

redis-cli> FCALL my_touch 1 bar
0

redis-cli> HMGET bar _created_at _updated_at
1) "1643581710"
2) "1643582276"

Thinking ahead

One of the basic rules of software development is that you cannot rely on user input, so let’s make sure we don’t do that. If a user creates a string named my_string and tries to run the touch function on it they will get an error:

redis-cli> SET my_string hello
OK

redis-cli> FCALL my_hset 1 my_string k1 v1
(error) WRONGTYPE Operation against a key holding the wrong kind of value script: my_hset, on @user_function:17.

Let’s handle this error by adding a type check:

if exists==1 and redis.call('TYPE', hash)["ok"] ~= 'hash' then
error = 'The key ' .. hash .. ' is not a hash'
redis.log(redis.LOG_WARNING, error);
return redis.error_reply(error)
end

The complete code:

#!lua name=mylib
local function hset(keys, args)
local hash = keys[1] -- Get the key name
local time = redis.call('TIME')[1] -- Get the current time from the Redis server

local exists = redis.call('exists', hash)
if exists==0 then
table.insert(args, '_created_at')
table.insert(args, time)
end

if exists==1 and redis.call('TYPE', hash)["ok"] ~= 'hash' then
local error = 'The key ' .. hash .. ' is not a hash'
redis.log(redis.LOG_WARNING, error);
return redis.error_reply(error)
end

-- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
table.insert(args, '_updated_at')
table.insert(args, time)

-- Run HSET with the updated argument list
return redis.call('HSET', hash, unpack(args))
end

local function touch(keys, args)
local time = redis.call('TIME')[1]
return redis.call('HSET', keys[1], '_updated_at', time)
end

redis.register_function('my_hset', hset)
redis.register_function('my_touch', touch)

If we reload the library and try again, we'll get an error with a helpful message:

$ cat /path/to/mylib.lua | redis-cli -x FUNCTION LOAD REPLACE

redis-cli> FCALL my_hset 1 my_string
(error) The key my_string is not a hash

Refactoring

We can notice that we have some code repetition in our library, namely the type check and the getting of the timestamp in both our functions. That's a good opportunity for some code reuse. Let's extract the logic into their own functions:

local function get_time()
return redis.call('TIME')[1]
end

local function is_not_hash(key_name)
if redis.call('TYPE', key_name)['ok'] ~= 'hash' then
return 'The key ' .. key_name .. ' is not a hash.'
end

return nil
end

This function is only going to be called by our two existing functions and not from the outside, so that's why we don't need to register it. The refactored code will now look like this:

#!lua name=mylib

-- Get the current time from the Redis server
local function get_time()
return redis.call('TIME')[1]
end

local function is_not_hash(key_name)
if redis.call('TYPE', key_name)['ok'] ~= 'hash' then
return 'The key ' .. key_name .. ' is not a hash.'
end

return nil
end

local function hset(keys, args)
local hash = keys[1] -- Get the key name
local time = get_time()

local exists = redis.call('exists', hash)
if exists==0 then
table.insert(args, '_created_at')
table.insert(args, time)
end

local hash_error = is_not_hash(hash)
if exists==1 and hash_error ~= nil then
return redis.error_reply(hash_error)
end

-- Add the current timestamp to the arguments that the user passed to the function, stored in `args`
table.insert(args, '_updated_at')
table.insert(args, time)

-- Run HSET with the updated argument list
return redis.call('HSET', hash, unpack(args))
end

local function touch(keys, args)
local hash = keys[1]

local hash_error = is_not_hash(hash)
if hash_error ~= nil then
return redis.error_reply(hash_error)
end

return redis.call('HSET', hash, '_updated_at', get_time())
end

redis.register_function('my_hset', hset)
redis.register_function('my_touch', touch)

Using Function flags

In this step, we'll get familiar with the use of Function flags - a piece of information that describes the circumstances under which a function is allowed to run. Currently, we support 5 flags:

  • no-writes - this flag indicates that the script only reads data but never writes.
  • allow-oom - this flag allows a script to execute even when the server is out of memory (OOM).
  • allow-stale - this flag enables running the script against a stale replica.
  • no-cluster - this flag doesn't allow the script to run in a clustered database.
  • allow-cross-slot-keys - this flag allows a script to access keys from multiple slots.

To best illustrate why function flags are useful we'll work with a simple example that gets the basic info and favourite colours of a user. Save the following snippet in a file named get_user.lua:

#!lua name=mynewlib
local function get_user(keys, args)
local hash = keys[1] -- Get the key name

local user = redis.call('HGETALL', hash)
local user_colours = redis.call('SMEMBERS', hash .. ':colours')

table.insert(user, #user+1, 'colours')
table.insert(user, #user+1, user_colours)


return user
end

redis.register_function('get_user', get_user)

If we try to execute this function with FCALL_RO - the read-only variant of FCALL, we will get an error, even though it only performs read operations. In order to demonstrate that the function is read-only, we need to use the no-writes flag in its registration:

$ cat /path/to/get_user.lua | redis-cli -x FUNCTION LOAD

redis-cli> FCALL_RO get_user 1 user:1
(error) ERR Can not execute a script with write flag using *_ro command.

#!lua name=mynewlib

local function get_user(keys, args)
local hash = keys[1] -- Get the key name

local user = redis.call('HGETALL', hash)
local user_colours = redis.call('SMEMBERS', hash .. ':colours')

table.insert(user, #user+1, 'colours')
table.insert(user, #user+1, user_colours)


return user
end

redis.register_function{
function_name='get_user',
callback=get_user,
flags={ 'no-writes' }
}

Finally, this will give us the expected result:

$ cat /path/to/get_user.lua | redis-cli -x FUNCTION LOAD REPLACE

redis-cli> FCALL_RO get_user 1 user:1
1) "email"
2) "foo@bar.com"
3) "colours"
4) 1) "green"
2) "red"
3) "blue"

That's it, you now know how to write, load and execute Redis Function. Congratulations!

For more information on Redis Functions you can check the Redis Functions documentation and to learn more about the Lua API you can check the Redis Lua API Reference.

- + \ No newline at end of file diff --git a/create/windows/index.html b/create/windows/index.html index d5cf8bee15..9d2ecfcdcb 100644 --- a/create/windows/index.html +++ b/create/windows/index.html @@ -4,7 +4,7 @@ How to Install Redis on Windows | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Install Redis on Windows


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

You can run Redis on Windows 10 using Windows Subsystem for Linux(a.k.a WSL2). WSL2 is a compatibility layer for running Linux binary executables natively on Windows 10 and Windows Server 2019. WSL2 lets developers run a GNU/Linux environment (that includes command-line tools, utilities, and applications) directly on Windows.

Follow these instructions to run a Redis database on Microsoft Windows 10.

Step 1: Turn on Windows Subsystem for Linux

In Windows 10, Microsoft replaced Command Prompt with PowerShell as the default shell. Open PowerShell as Administrator and run this command to enable Windows Subsystem for Linux (WSL):

 Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux

Reboot Windows after making the change — note that you only need to do this once.

Step 2: Launch Microsoft Windows Store

 start ms-windows-store:

Then search for Ubuntu, or your preferred distribution of Linux, and download the latest version.

Step 3: Install Redis server

Installing Redis is simple and straightforward. The following example works with Ubuntu (you'll need to wait for initialization and create a login upon first use):

 sudo apt-add-repository ppa:redislabs/redis
sudo apt-get update
sudo apt-get upgrade
sudo apt-get install redis-server
note

The sudo command may or may not be required based on the user configuration of your system.

Step 4: Restart the Redis server

Restart the Redis server as follows:

 sudo service redis-server restart

Step 5: Verify if your Redis server is running

Use the redis-cli command to test connectivity to the Redis database.

 $ redis-cli
127.0.0.1:6379> set user:1 "Jane"
127.0.0.1:6379> get user:1
"Jane"
note

By default, Redis has 0-15 indexes for databases, you can change that number databases NUMBER in redis.conf.

Step 6: Stop the Redis Server

 sudo service redis-server stop

Next Steps

Model Redis data in your .NET Applications

redis OM logo

References

Redis University

Check out this video if you want to see Redis on Windows 10 Home Edition in action.

- + \ No newline at end of file diff --git a/devcember/index.html b/devcember/index.html index bbca6b5d9b..905e3fb08b 100644 --- a/devcember/index.html +++ b/devcember/index.html @@ -4,7 +4,7 @@ Redis DEVcember | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis DEVcember


Profile picture for Suze Shardlow
Author:
Suze Shardlow, Developer Community Manager at Redis
Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

What's it all About?

We're excited to announce DEVcember, a month-long festival of live online events and fun challenges, showcasing Redis and our community!

Join us on Twitch or YouTube for a daily 15-20 minute live stream on Monday to Friday where we'll introduce Redis concepts, share projects that you can contribute to and have some fun banter. For Saturday and Sunday, we’ll give you a mini challenge to complete.

Don't worry if you miss a stream - we'll post them on YouTube and embed them in the schedule below.

The fun begins Wednesday 1st December 2021 at 9am UTC.

Live Schedule

We'll be on the Redis channel on Twitch and YouTube each working day in December… times will vary and we'll update the schedule below daily, revealing each day's topic as we go.

Date/TimeTopicJoin Us!
Wed 1 Dec, 9am UTCWelcome to DEVcember, Get Started with Redis in the Cloud
Thu 2 Dec, 10.30am UTCUp and Running with RedisInsight
Fri 3 Dec, 8pm UTCThe Redis List goes on and on!
Sat 4 / Sun 5 DecFirst weekend hands-on exerciseTake the challenge on GitHub
Mon 6 Dec, 4:30pm UTCLet's try out Redis OM for Python
Tue 7 Dec, 5pm UTCThe Scoop on Big O Notation
Wed 8 Dec, 4:30pm UTCGet! Set! Go!
Thu 9 Dec, 10:30am UTCHave I Seen You Before? Introducing Bloom Filters
Fri 10 Dec, 6pm UTCYou Can (Mostly) Count on Hyperloglog!
Sat 11 / Sun 12 DecSecond weekend hands-on exerciseTake the challenge on GitHub
Mon 13 Dec, 4pm UTCSort it out! All about Sorted Sets
Tue 14 Dec, 4:45pm UTCWhat's the Score? Top K with Redis Bloom!
Wed 15 Dec, 10am UTCSeek and You May Find… Introducing Redis Search! (Part 1)
Wed 15 Dec, 10am UTCSeek and You May Find… Introducing Redis Search! (Part 2)
Thu 16 Dec, 3:45pm UTCIntroducing Redis OM for Node.js
Fri 17 Dec, 4pm UTCObject Mapping and More! Redis OM for .NET
Sat 18 / Sun 19 DecThird weekend hands-on exerciseTake the challenge on GitHub
Mon 20 Dec, 1pm UTCDon't Cross the (Redis) Streams!
Tue 21 Dec, 5pm UTCWhat's the deal with Pub/Sub?
Wed 22 Dec, 5:15pm UTCSpring into Redis OM! (Redis OM for Java/Spring Framework)
Thu 23 Dec, 5pm UTCFinding your way with Redis Geospatial!
Fri 24 Dec, 9:15am UTCHerding Cats with Redis JSON

Meet the Team

Your regular presenters are:

Suze Shardlow, Developer Community Manager

Suze leads developer community at Redis. She’s a software engineer, published tech author and event MC who has spoken at several global tech conferences. When she’s not talking databases and putting together content, she loves crafting!

Justin Castilla, Developer Advocate

Justin is a Developer Advocate at Redis. He has helped to produce several courses at Redis University and has created numerous videos for the Redis YouTube channel.

Simon Prickett, Developer Advocacy Manager

Simon Prickett is the Developer Advocacy Manager at Redis. He began his career writing C++ for Hewlett-Packard Labs, and has subsequently held senior roles with companies ranging from startups to enterprises including Capital One, USA Today, and New Zealand’s Customs Service. Away from professional life Simon enjoys traveling, cycling, and building devices with microcontrollers.


We'll also feature guest appearances from other members of the Redis developer relations team!

Join the Fun!

Be sure to follow us on Twitch to be notified when we're online! We'd also love to see you on the Redis Discord, where there's a dedicated channel for all things DEVcember. This is where you can chat with us throughout the day and get help and information about the fun Redis commands / coding challenges we'll post for you to try on weekends.

If you're on Twitter, use hashtag #RedisDEVcember to join the conversation.

Learn More

To learn more about Redis and get yourself a certificate that you can add to your LinkedIn profile, check out our free online courses at Redis University which are available all year round. There's something for everyone!

- + \ No newline at end of file diff --git a/develop/C/index.html b/develop/C/index.html index 6313de3795..e15cf474c8 100644 --- a/develop/C/index.html +++ b/develop/C/index.html @@ -4,7 +4,7 @@ C and Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

C and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Find tutorials, examples and technical articles that will help you to develop with Redis and C.

Getting Started

In order to use Redis with C, you need a C Redis client. For your first steps with C and Redis, this article will show how to use the recommended library: hiredis.

Hiredis is a minimalistic C client library for the Redis database.It is minimalistic because it just adds minimal support for the protocol, but at the same time it uses a high level printf-alike API in order to make it much higher level than otherwise suggested by its minimal code base and the lack of explicit bindings for every Redis command.

Step 1. Install the pre-requisites

Version 1.0.0 marks the first stable release of Hiredis. Follow the below steps to install the pre-requisite packages in order to compile the latest version of hiredis.

 brew install gcc make

Run the below command to run Redis server

 redis-server

Step 2. Install and compile hiredis

 wget https://github.com/redis/hiredis/archive/master.zip
make
make install

Step 3. Copy the below C code:

 #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hiredis/hiredis.h>

int main (int argc, char **argv) {
redisReply *reply;
redisContext *c;

c = redisConnect("127.0.0.1", 6381);
if (c->err) {
printf("error: %s\n", c->errstr);
return 1;
}

/* PINGs */
reply = redisCommand(c,"PING %s", "Hello World");
printf("RESPONSE: %s\n", reply->str);
freeReplyObject(reply);

redisFree(c);
}

Step 4. Compile the code

 gcc redistest.c -o redistest -I /usr/local/include/hiredis -lhiredis

Step 5. Test the code

 ./redistest
RESPONSE: Hello World

More C Clients Resources

  • hiredis-cluster - C client library for Redis Cluster

  • libredis - A C based general low-level PHP extension and client library for Redis, focusing on performance, generality and efficient parallel communication with multiple Redis servers.

  • hiredispool - Provides connection pooling and auto-reconnect for hiredis. It is also minimalistic and easy to do customization.

- + \ No newline at end of file diff --git a/develop/deno/index.html b/develop/deno/index.html index fc96156deb..82aaa1c612 100644 --- a/develop/deno/index.html +++ b/develop/deno/index.html @@ -4,7 +4,7 @@ Deno and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Deno and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

With over 80,000 stars and 670+ contributors, Deno is a popular modern runtime for JavaScript and TypeScript. It is built on V8, an open-source JavaScript engine developed by the Chromium Project for Google Chrome and Chromium web browsers.

deno

Features of Deno

  • Deno is secure by default. It executes code in a sandbox environment, disallowing runtime access to the underlying filesystem, environment variables and scripts.
  • Deno supports both JavaScript and TypeScript out of the box.
  • Deno ships as a single executable with no dependencies.
  • Comes with built-in utilities such as a dependency inspector (deno info) and a code formatter (deno fmt).

Getting Started

deno.land/x is a hosting service for Deno scripts. It caches releases of open source modules stored on GitHub and serves them at one easy-to-remember domain. These modules contain small scripts that demonstrate use of Deno and its standard module.

The basic format of code URLs is

https://deno.land/x/IDENTIFIER@VERSION/FILE_PATH

Example:

https://deno.land/std@0.126.0/examples

In order to use Redis with Deno you will need a Deno Redis client. In the following sections, we will demonstrate the use of an experimental implementation of a Redis client for Deno.

Step 1. Set up a free Redis Enterprise Cloud account

Visit redis.com/try-free and create a free Redis Enterprise Cloud account. Once you complete this tutorial, you will be provided with the database endpoint URL and password. Save it for future reference.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

Database details

Step 2. Get Deno

brew install deno

Step 3. Verify if Deno is properly installed

deno -V
deno 1.19.0

Step 4. Create an empty file with the following content

The following code creates a connection to Redis using Deno:

import { connect } from 'https://deno.land/x/redis/mod.ts';
const redis = await connect({
hostname: 'redis-18386.c110-qa.us-east-1-1.ec2.qa-cloud.redislabs.com',
port: 18386,
password: 'XXXX',
});
const ok = await redis.set('foo', 'bar');
const foo = await redis.get('foo');

Replace the values of hostname and port to match those of your Redis database, and add an extra password field if needed.

Step 5. Executing the script

Deno can grab scripts from multiple sources. For example, you can provide a filename, a URL, or'-' to read the file from stdin. You can run a JavaScript or TypeScript program by executing deno run.

deno run --allow-net redis.ts

When you run the script, the value of foo should be output. You can verify this by running the monitor command:

redis-15692.c264.ap-south-1-1.ec2.cloud.redislabs.com:15692> monitor
OK
1646536310.435577 [0 122.171.165.94:50193] "AUTH" "(redacted)"
1646536310.475578 [0 122.171.165.94:50193] "SET" "foo" "bar"
1646536310.511578 [0 122.171.165.94:50193] "GET" "foo"

Additional references:

Redis Launchpad
- + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html b/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html index 8295ce8640..37c23c77a2 100644 --- a/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html +++ b/develop/dotnet/aspnetcore/caching/basic-api-caching/index.html @@ -4,7 +4,7 @@ How to add a basic API Cache to your ASP.NET Core application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to add a basic API Cache to your ASP.NET Core application

Redis is synonymous with caching, and for a good reason, Redis is fast and easy to get up and running with and does an excellent job as a cache.

There are two big reasons to use a cache over the source of truth.

  1. Time - caches are much faster
  2. Cost - sometimes going to a source of truth has a monetary cost. For example, API endpoints sometimes charge per request. This means that we want to limit unnecessary requests to a particular endpoint.

In the second case, unnecessary requests to the API endpoint are wasteful and can add up to a high financial cost to the application over time. Therefore, in this tutorial, we will look at caching the results of API requests to prevent us from having to make round trips to an API.

For our example, we will use the US National Weather Service's (NWS) Weather API - which is free and requires no authentication beyond a user-agent. We will build an API to get a weather forecast based on latitude and longitude using ASP.NET Core.

Prerequisites

Start Redis

Let's start out by starting redis; for development purposes, you can just use docker:

docker run -p 6379:6379 redis

If you are getting ready to deploy to production, you may want to make use of the Redis Cloud

Create the Project

Next, we'll create the ASP.NET Core API project using the .NET CLI.

dotnet new webapi -n BasicWeatherCacheApp

Then we'll cd into the BasicWeatherCacheApp directory that we just created, and we will add the StackExchange.Redis package to the project:

dotnet add package StackExchange.Redis

Add Redis Cache to ASP.NET Core app

Open up the program.cs file. This is where the services are all defined and injected into the project. Add the following to add the StackExchange.Redis ConnectionMultiplexer Redis to the ASP.NET Core application as well as an HttpClient:

builder.Services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));
builder.Services.AddHttpClient();

Create Data Structures to Hold Results

The resulting structure from the NWS is a bit verbose, but we will endeavor to just capture the future forecasts for a particular area.

We'll create two structures, the first will contain the actual forecast, and the second will have the list of forecasts from a given request, as well as the time it took to accumulate the forecasts. For the first, we'll use the default WeatherForecast class that's created in the template, open up WeatherForecast.cs, and replace its contents with:

public class WeatherForecast
{
[JsonPropertyName("number")]
public int Number { get; set; }

[JsonPropertyName("name")]
public string Name { get; set; }

[JsonPropertyName("startTime")]
public DateTime StartTime { get; set; }

[JsonPropertyName("endTime")]
public DateTime EndTime { get; set; }

[JsonPropertyName("isDayTime")]
public bool IsDayTime { get; set; }

[JsonPropertyName("temperature")]
public int Temperature { get; set; }

[JsonPropertyName("temperatureUnit")]
public string? TemperatureUnit { get; set; }

[JsonPropertyName("temperatureTrend")]
public string? TemperatureTrend { get; set; }

[JsonPropertyName("windSpeed")]
public string? WindSpeed { get; set; }

[JsonPropertyName("windDirection")]
public string? WindDirection { get; set; }

[JsonPropertyName("shortForecast")]
public string? ShortForecast { get; set; }

[JsonPropertyName("detailedForecast")]
public string? DetailedForecast { get; set; }
}

Next, create the file ForecastResult.cs and add the following to it:

public class ForecastResult
{
public long ElapsedTime { get; }
public IEnumerable<WeatherForecast> Forecasts { get; }

public ForecastResult(IEnumerable<WeatherForecast> forecasts, long elapsedTime)
{
Forecasts = forecasts;
ElapsedTime = elapsedTime;
}
}

Dependency Injection Into the Weather Forecast Controller

Now that we've set up our app, we need to configure our controller. First, open the Controllers/WeatherForecastController (this controller is automatically created along with the template) and add the following code to inject what we need into it.

private readonly HttpClient _client;
private readonly IDatabase _redis;

public WeatherForecastController(HttpClient client, IConnectionMultiplexer muxer)
{
_client = client;
_client.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue("weatherCachingApp","1.0") );
_redis = muxer.GetDatabase();
}

Query the API

To query the Weather API to find the forecast for a particular latitude and longitude, we need to go through a 2 step process. First, there's no natural API for querying the forecast based on geolocation. Instead, every geolocation is assigned a particular office out of which it's monitored, and each office has a grid 2D grid that a specific latitude and longitude will map to. Fortunately, there's a points API endpoint to which you can pass your latitude and longitude. This will give you the particular office out of which the point is valid and the x/y grid coordinates for that point. You need to query the forecast endpoint for that grid points for that office and then pull out the forecasted periods. The following accomplishes all this.

private async Task<string> GetForecast(double latitude, double longitude)
{
var pointsRequestQuery = $"https://api.weather.gov/points/{latitude},{longitude}"; //get the URI
var result = await _client.GetFromJsonAsync<JsonObject>(pointsRequestQuery);
var gridX = result["properties"]["gridX"].ToString();
var gridY = result["properties"]["gridY"].ToString();
var gridId = result["Properties"]["gridId"].ToString();
var forecastRequestQuery = $"https://api.weather.gov/gridpoints/{gridId}/{gridX},{gridY}/forecast";
var forecastResult = await _client.GetFromJsonAsync<JsonObject>(forecastRequestQuery);
var periodsJson = forecastResult["properties"]["periods"].ToJsonString();
return periodsJson;
}

Write the Forecast Action

Given the multiple API Calls, it's clear why using a cache is critical for our application. These forecasts do not update very often, every 1-3 hours. That means making two back-to-back API requests can be expensive in both time and money. In the case of this API, there's no financial cost associated with the requests. However, with a commercial API, there often times will be per-request costs. When we are writing this action, we will check the cache. If the cache contains the relevant forecast, we will return that. Otherwise, we will hit the API, save the result, and set the key to expire. We'll time it and then reply back with the result and time it took.

[HttpGet(Name = "GetWeatherForecast")]
public async Task<ForecastResult> Get([FromQuery] double latitude, [FromQuery] double longitude)
{
string json;
var watch = Stopwatch.StartNew();
var keyName = $"forecast:{latitude},{longitude}";
json = await _redis.StringGetAsync(keyName);
if (string.IsNullOrEmpty(json))
{
json = await GetForecast(latitude, longitude);
var setTask = _redis.StringSetAsync(keyName, json);
var expireTask = _redis.KeyExpireAsync(keyName, TimeSpan.FromSeconds(3600));
await Task.WhenAll(setTask, expireTask);
}

var forecast =
JsonSerializer.Deserialize<IEnumerable<WeatherForecast>>(json);
watch.Stop();
var result = new ForecastResult(forecast, watch.ElapsedMilliseconds);

return result;
}

Run the App

All that's left to do now is run the app. Run dotnet run in your console, and open up to https://localhost:PORT_NUMBER/swagger/index.html and use the GUI to send a request. Otherwise, you can use a cURL to send the request. The first time you send a new latitude and longitude, you'll notice that it takes pretty long to send the request, ~1 second. When you make the request again, and it hits the cache, it will drop dramatically to ~1-5ms.

Resources

  • Source code for this demo is located in GitHub
  • More documentation for the StackExchange.Redis library is located on it's docs site
- + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html b/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html index 267a60f7e7..d69ab7eb34 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/fixed-window/index.html @@ -4,7 +4,7 @@ How to implement a Fixed Window Rate Limiting app using ASP.NET Core & Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to implement a Fixed Window Rate Limiting app using ASP.NET Core & Redis


Profile picture for Steve Lorello
Author:
Steve Lorello, Senior Field Engineer at Redis

In this tutorial, we will build an app that implements basic fixed-window rate limiting using Redis & ASP.NET Core.

Prerequisites

Startup Redis

Before we begin, startup Redis. For this example, we'll use the Redis docker image:

 docker run -dp 6379:6379 redis

Create Project

In your terminal, navigate to where you want the app to live and run:

 dotnet new webapi -n FixedRateLimiter --no-https

Change directory to FixedRateLimiter and run the below command:

dotnet add package StackExchange.Redis

Open the FixedRateLimiter.csproj file in Visual Studio or Rider (or open the folder in VS Code) and in the Controllers folder, add an API controller called RateLimitedController, when all this is complete, RateLimitedController.cs should look like the following:

  namespace FixedRateLimiter.Controllers
{
[ApiController]
[Route("api/[controller]")]
public class RateLimitedController : ControllerBase
{

}
}

Initialize The Multiplexer

To use Redis, we're going to initialize an instance of the ConnectionMultiplexer from StackExchange.Redis, to do so, go to the ConfigureServices method inside of Startup.cs and add the following line:

 services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

Inject the ConnectionMultiplexer

In RateLimitedController.cs inject the ConnectionMultiplexer into the controller and pull out an IDatabase object from it with the following:

 private readonly IDatabase _db;
public RateLimitedController(IConnectionMultiplexer mux)
{
_db = mux.GetDatabase();
}

Add a Simple Route

We will add a simple route that we will Rate Limit; it will be a POST request route on our controller. This POST request will use Basic auth - this means that each request is going to expect a header of the form Authorization: Basic <base64encoded> where the base64encoded will be a string of the form apiKey:apiSecret base64 encoded, e.g. Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==. This route will parse the key out of the header and return an OK result.

 [HttpPost("simple")]
public async Task<IActionResult> Simple([FromHeader]string authorization)
{
var encoded = string.Empty;
if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
return Ok();
}

With that setup, you should run the project with a dotnet run, and if you issue a POST request to https://localhost:5001/api/RateLimited/simple - with apiKey foobar and password password, you will get a 200 OK response back.

You can use this cURL request to elicit that response:

 curl -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/RateLimited/simple

Fixed Window Rate Limiting Lua Script

We are going to build a Fixed Window Rate limiting script. A fixed Window Rate Limiter will limit the number of requests in a particular window in time. In our example, we will limit the number of requests to a specific route for a specific API Key. So, for example, if we have the apiKey foobar hitting our route api/ratelimited/simple at 12:00:05 and we have a 60-second window, in which you can send no more than ten requests, we need to:

  1. Format a key from our info, e.g. Route:ApiKey:time-window - in our case, this would be api/ratelimited/simple:foobar:12:00
  2. Increment the current value of that key
  3. Set the expiration for that key for 60 seconds
  4. If the current value of the key is less than or equal to the max requests allowed, increment the key and return false (not rate limited)
  5. If the current value of the key is greater than or equal to the max number of requests allowed, return true (rate limited)

The issue we need to contend with here is that this rate-limiting requires atomicity for all our commands (e.g. between when we get and increment the key we don't want anyone coming in and hitting it). Because of this, we will run everything on the server through a Lua script. Now there are two ways to write this Lua script. The traditional way, where you drive everything off of keys and arguments, the following

 local key = KEYS[1]
local max_requests = tonumber(ARGV[1])
local expiry = tonumber(ARGV[2])
local requests = redis.call('INCR',key)
redis.call('EXPIRE', key, expiry)
if requests < max_requests then
return 0
else
return 1
end

Alternatively, StackExchange.Redis contains support for a more readable mode of scripting they will let you name arguments to your script, and the library will take care of filling in the appropriate items at execution time. That mode of scripting, which we will use here, will produce this script:

 local requests = redis.call('INCR',@key)
redis.call('EXPIRE', @key, @expiry)
if requests < tonumber(@maxRequests) then
return 0
else
return 1
end

Loading the Script

To run a Lua script with StackExchange.Redis, you need to prepare a script and run it. So consequentially add a new file Scripts.cs to the project, and in that file add a new static class called Scripts; this will contain a constant string containing our script and a getter property to prepare the script for execution.

 using StackExchange.Redis;
namespace FixedRateLimiter
{
public static class Scripts
{
public static LuaScript RateLimitScript => LuaScript.Prepare(RATE_LIMITER);

private const string RATE_LIMITER = @"
local requests = redis.call('INCR',@key)
redis.call('EXPIRE', @key, @expiry)
if requests < tonumber(@maxRequests) then
return 0
else
return 1
end
";
}
}

Executing the Script

With the script setup, all that's left to do is build our key, run the script, and check the result. We already extracted the apiKey earlier, so; we will use that, the request path, and the current time to create our key. Then, we will run ScriptEvaluateAsync to execute the script, and we will use the result of that to determine whether to return a 429 or our JSON result. Add the following just ahead of the return in our Simple method:

 var script = Scripts.RateLimitScript;
var key = $"{Request.Path.Value}:{apiKey}:{DateTime.Now:hh:mm}";
var res = await _db.ScriptEvaluateAsync(script, new {key = new RedisKey(key), expiry = 60, maxRequests = 10});
if ((int) res == 1)
return new StatusCodeResult(429);

Our Simple route's code should look like this:

 [HttpPost("simple")]
public async Task<IActionResult> Simple([FromHeader]string authorization)
{
var encoded = string.Empty;
if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
var script = Scripts.RateLimitScript;
var key = $"{Request.Path.Value}:{apiKey}:{DateTime.UtcNow:hh:mm}";
var res = await _db.ScriptEvaluateAsync(script, new {key = new RedisKey(key), expiry = 60, maxRequests = 10});
if ((int) res == 1)
return new StatusCodeResult(429);
return new JsonResult(new {key});
}

Now, if we start our server back up with dotnet run and try running the following command:

 for n in {1..21}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/simple); sleep 0.5; done

You will see some of your requests return a 200, and at least one request return a 429. How many depends on the time at which you start sending the request. Recall, the requests are time-boxed on single-minute windows, so if you transition to the next minute in the middle of the 21 requests, the counter will reset. Hence, you should expect to receive somewhere between 10 and 20 OK results and between 1 and 11 429 results. The Response should look something like this:

 HTTP 200, 0.002680 s
HTTP 200, 0.001535 s
HTTP 200, 0.001653 s
HTTP 200, 0.001449 s
HTTP 200, 0.001604 s
HTTP 200, 0.001423 s
HTTP 200, 0.001492 s
HTTP 200, 0.001449 s
HTTP 200, 0.001551 s
{"status":429,"traceId":"00-16e9da63f77c994db719acff5333c509-f79ac0c862c5a04c-00"} HTTP 429, 0.001803 s
{"status":429,"traceId":"00-3d2e4e8af851024db121935705d5425f-0e23eb80eae0d549-00"} HTTP 429, 0.001521 s
{"status":429,"traceId":"00-b5e824c9ebc4f94aa0bda2a414afa936-8020a7b8f2845544-00"} HTTP 429, 0.001475 s
{"status":429,"traceId":"00-bd6237c5d0362a409c436dcffd0d4a7a-87b544534f397247-00"} HTTP 429, 0.001549 s
{"status":429,"traceId":"00-532d64033c54a148a98d8efe1f9f53b2-b1dbdc7d8fbbf048-00"} HTTP 429, 0.001476 s
{"status":429,"traceId":"00-8c210b1c1178554fb10aa6a7540d3488-0fedba48e38fdd4b-00"} HTTP 429, 0.001606 s
{"status":429,"traceId":"00-633178f569dc8c46badb937c0363cda8-ab1d1214b791644d-00"} HTTP 429, 0.001661 s
{"status":429,"traceId":"00-12f01e448216c64b8bfe674f242a226f-d90ff362926aa14e-00"} HTTP 429, 0.001858 s
{"status":429,"traceId":"00-63ef51cee3bcb6488b04395f09d94def-be9e4d6d6057754a-00"} HTTP 429, 0.001622 s
{"status":429,"traceId":"00-80a971db60fdf543941e2457e35ac2fe-3555f5cb9c907e4c-00"} HTTP 429, 0.001710 s
{"status":429,"traceId":"00-f718734ae0285343ac927df617eeef92-91a49e127f2e4245-00"} HTTP 429, 0.001582 s
{"status":429,"traceId":"00-9da2569cce4d714480dd4f0edc0506d2-8a1ce375b1a9504f-00"} HTTP 429, 0.001629 s

Resources

Redis Launchpad
- + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html b/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html index f51531fbf8..a54d9e6d88 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/middleware/index.html @@ -4,7 +4,7 @@ Configurable Sliding Window Rate Limiting Middleware for Redis & ASP.NET Core | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Configurable Sliding Window Rate Limiting Middleware for Redis & ASP.NET Core

Let's consider the case (which is probably most cases) where we have multiple endpoints we want to rate limit; it doesn't make an awful lot of sense to embed rate-limiting in those cases in the logic of the routes themselves. Instead, have something that will intercept requests and check to see if the request is rate-limited before moving onto the appropriate endpoint. To accomplish this, we'll build some middleware for just this purpose. And with some light configuration work, we'll be able to build some middleware to handle a configurable set of limits.

Prerequisites

Startup Redis

Before we begin, startup Redis. For this example, we'll use the Redis docker image:

docker run -p 6379:6379 redis

Create Project

In your terminal, navigate to where you want the app to live and run:

dotnet new webapi -n RateLimitingMiddleware --no-https

Cd into the RateLimitingMiddleware folder and run the command dotnet add package StackExchange.Redis.

Open RateLimitingMiddleware.csproj in Rider, Visual Studio, or open the folder in VS Code. Then, in the Controllers folder, add an API controller called RateLimitedController. When all this is complete, RateLimitedController.cs should look like the following:

namespace SlidingWindowRateLimiter.Controllers
{
[ApiController]
[Route("api/[controller]")]
public class RateLimitedController : ControllerBase
{
}
}

Create Configuration Object

Now it's time to dig into the logic behind this middleware. The first thing we ought to do is consider the configurations we will use to configure our middleware. We'll consider configuration objects to contain the following form in our application configuration:

{
"RedisRateLimits": [
{
"Path": "/api/ratelimited/limited",
"Window": "30s",
"MaxRequests": 5
},
{
"PathRegex": "/api/*",
"Window": "1d",
"MaxRequests": 1000
}
]
}

In other words, we have four parameters.

Parameter NameDescription
Pathliteral path to be rate-limited if the path matches completely, it will trigger a rate limit check
PathRegexPath regex to be rate-limited; if path matches, it will trigger a rate limit check
WindowThe Sliding Window to Rate Limit on should match the pattern `([0-9]+(smdh))`
MaxRequestsThe maximum number of requests allowable over the period

And those parameters are going to be stored under the configuration node RedisRateLimits in our configuration.

Build Config Object

The configuration objects we'll use for this will contain the logic of the rule and some parsing logic to handle parsing the timeouts from the window pattern. So we'll create a new class called RateLimitRule In this class, we'll add a regex to do the pattern matching for our window:

public class RateLimitRule
{

}

Time Regex

private static readonly Regex TimePattern = new ("([0-9]+(s|m|d|h))");

Time Unit Enum

Then we'll create an enum that we'll store the unit half of the window size in:

private enum TimeUnit
{
s = 1,
m = 60,
h = 3600,
d = 86400
}

Parse Time

We are going to measure time windows in seconds (as that will be the most native thing for Redis), so we will now need to have a method to convert our time window to seconds:

private static int ParseTime(string timeStr)
{
var match = TimePattern.Match(timeStr);
if (string.IsNullOrEmpty(match.Value))
throw new ArgumentException("Rate limit window was not provided or was not " +
"properly formatted, must be of the form ([0-9]+(s|m|d|h))");
var unit = Enum.Parse<TimeUnit>(match.Value.Last().ToString());
var num = int.Parse(match.Value.Substring(0, match.Value.Length - 1));
return num * (int) unit;
}

Add properties

Next, we'll need to add the Properties of this class so that we don't have to repeat computation. We'll store _windowSeconds in a separate private field:

public string Path { get; set; }
public string PathRegex { get; set; }
public string Window { get; set; }
public int MaxRequests { get; set; }
internal int _windowSeconds = 0;
internal string PathKey => string.IsNullOrEmpty(Path) ? Path : PathRegex;
internal int WindowSeconds
{
get
{
if (_windowSeconds < 1)
{
_windowSeconds = ParseTime(Window);
}
return _windowSeconds;
}
}

Match Path

Finally, we'll perform the pattern matching against the path:

public bool MatchPath(string path)
{
if (!string.IsNullOrEmpty(Path))
{
return path.Equals(Path, StringComparison.InvariantCultureIgnoreCase);
}
if (!string.IsNullOrEmpty(PathRegex))
{
return Regex.IsMatch(path, PathRegex);
}
return false;
}

Writing our Lua Script

We need to write a Lua script that will consider all the rules applicable to a particular user on a specific endpoint. We'll use sorted sets to check the rate limits for each rule and user. On each request, it will take each applicable rule and:

  1. Check the current time
  2. Trim off entries that fall outside the window
  3. Check if another request violates the rule
    • If the request would violate any rules return 1
  4. For each applicable rule
  5. Add a new entry to the sorted set with the score of the current time in seconds, and a member name of the current time in microseconds
  6. Return 0

As we have an undetermined number of rules ahead of time, it's impossible to use the StackExchange.Redis Library's, but we can still use a Lua script to accomplish this.

local current_time = redis.call('TIME')
local num_windows = ARGV[1]
for i=2, num_windows*2, 2 do
local window = ARGV[i]
local max_requests = ARGV[i+1]
local curr_key = KEYS[i/2]
local trim_time = tonumber(current_time[1]) - window
redis.call('ZREMRANGEBYSCORE', curr_key, 0, trim_time)
local request_count = redis.call('ZCARD',curr_key)
if request_count >= tonumber(max_requests) then
return 1
end
end
for i=2, num_windows*2, 2 do
local curr_key = KEYS[i/2]
local window = ARGV[i]
redis.call('ZADD', curr_key, current_time[1], current_time[1] .. current_time[2])
redis.call('EXPIRE', curr_key, window)
end
return 0

The above script has an undetermined number of arguments and an undetermined number of keys ahead of time. As such, it's essential to make sure that all the keys are on the same shard, so when we build the keys, which will be of the form path_pattern:apiKey:window_size_seconds, we will surround the common part of the key apiKey with braces {apiKey}.

Build The Middleware

Now it's time to actually build the middleware. Add a new file SlidingWindowRateLimiter.cs Inside that file, add two classes SlidingWindowRateLimiter and SlidingWindowRateLimiterExtensions

In the SlidingWindowRateLimiterExtensions class, add one method to add the SlidingWIndowRateLimiter to the middleware pipeline, that class will look like this when completed:

public static class SlidingWindowRateLimiterExtensions
{
public static void UseSlidingWindowRateLimiter(this IApplicationBuilder builder)
{
builder.UseMiddleware<SlidingWindowRateLimiter>();
}
}

In the SlidingWindowRateLimiter class, start by adding the script mentioned above as a const string for the class:

private const string SlidingRateLimiter = @"
local current_time = redis.call('TIME')
local num_windows = ARGV[1]
for i=2, num_windows*2, 2 do
local window = ARGV[i]
local max_requests = ARGV[i+1]
local curr_key = KEYS[i/2]
local trim_time = tonumber(current_time[1]) - window
redis.call('ZREMRANGEBYSCORE', curr_key, 0, trim_time)
local request_count = redis.call('ZCARD',curr_key)
if request_count >= tonumber(max_requests) then
return 1
end
end
for i=2, num_windows*2, 2 do
local curr_key = KEYS[i/2]
local window = ARGV[i]
redis.call('ZADD', curr_key, current_time[1], current_time[1] .. current_time[2])
redis.call('EXPIRE', curr_key, window)
end
return 0
";

Constructor

We need to seed this class with an IDatabase to access redis, an IConfiguration to access the configuration, and of course, the next chain in the pipeline to continue. So consequentially, we'll dependency inject all this into our middleware:

private readonly IDatabase _db;
private readonly IConfiguration _config;
private readonly RequestDelegate _next;

public SlidingWindowRateLimiter(RequestDelegate next, IConnectionMultiplexer muxer, IConfiguration config)
{
_db = muxer.GetDatabase();
_config = config;
_next = next;
}

Extract Api Key

In this case, we will use basic auth, so we will be using the username from the basic auth structure as our apiKey. We will need a method to extract it consequentially:

private static string GetApiKey(HttpContext context)
{
var encoded = string.Empty;
var auth = context.Request.Headers["Authorization"];
if (!string.IsNullOrEmpty(auth)) encoded = AuthenticationHeaderValue.Parse(auth).Parameter;
if (string.IsNullOrEmpty(encoded)) return encoded;
return Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
}

Extract Applicable Rules

From the configuration structure we generated before, we will pull out the RedisRateLimits section and stuff it into an array of RateLimitRule objects. We then need to pull out the rules that apply to the current path, group them by the number of seconds in their windows and by the path key component that's relevant for them. If we have identical path keys, e.g., two instances of ^/api/*, we'll take the more restrictive one(fewest allowable requests). We can pull the with a LINQ query:

public IEnumerable<RateLimitRule> GetApplicableRules(HttpContext context)
{
var limits = _config.GetSection("RedisRateLimits").Get<RateLimitRule[]>();
var applicableRules = limits
.Where(x => x.MatchPath(context.Request.Path))
.OrderBy(x => x.MaxRequests)
.GroupBy(x => new{x.PathKey, x.WindowSeconds})
.Select(x=>x.First());
return applicableRules;
}

Check Limitation

Our next step is to check to see if the key is currently under a limitation. Our script expects an array of redis keys of the pattern mentioned above path_pattern:{apiKey}:window_size_seconds, then it needs the number of rules to be enforced, and finally, it needs the rules appended in window_size num_requests order. With the arguments all generated for the script, all we need to do is to evaluate the script and check if it returns one or not:

private async Task<bool> IsLimited( IEnumerable<RateLimitRule> rules, string apiKey)
{
var keys = rules.Select(x => new RedisKey($"{x.PathKey}:{{{apiKey}}}:{x.WindowSeconds}")).ToArray();
var args = new List<RedisValue>{rules.Count()};
foreach (var rule in rules)
{
args.Add(rule.WindowSeconds);
args.Add(rule.MaxRequests);
}
return (int) await _db.ScriptEvaluateAsync(SlidingRateLimiter, keys,args.ToArray()) == 1;
}

Block or Allow

Finally, in the InvokeAsync method for our middleware, we will glue all this together. First, we'll parse out the apiKey. If the apiKey isn't present, we'll return a 401. Otherwise, we will perform the rate-limiting checks and either throttle or proceed as appropriate.

public async Task InvokeAsync(HttpContext httpContext)
{
var apiKey = GetApiKey(httpContext);
if (string.IsNullOrEmpty(apiKey))
{
httpContext.Response.StatusCode = 401;
return;
}
var applicableRules = GetApplicableRules(httpContext);
var limited = await IsLimited(applicableRules, apiKey);
if (limited)
{
httpContext.Response.StatusCode = 429;
return;
}
await _next(httpContext);
}

Build Controller

Under the Controllers Folder, add a class named RateLimitedController. Then, in this controller, declare a new ApiController.

[ApiController]
[Route("api/[controller]")]
public class RateLimitedController : ControllerBase
{
}

In this class, add two new routes, one to limited and indirectly-limited

[HttpGet]
[HttpPost]
[Route("limited")]
public async Task<IActionResult> Limited()
{
return new JsonResult(new {Limited = false});
}

[HttpGet]
[HttpPost]
[Route("indirectly-limited")]
public async Task<IActionResult> IndirectlyLimited()
{
return new JsonResult(new {NeverLimited = true});
}

Add Middleware to App

Open up startup.cs

In the ConfigureServices method, add the following line:

services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

In the method Configure method, add the following line:

app.UseSlidingWindowRateLimiter();

Configure the App

In appsettings.json, or appsettings.Development.json, add a configuration item for the rate limits:

"RedisRateLimits":[
{
"Path":"/api/RateLimited/limited",
"Window":"30s",
"MaxRequests": 5
},
{
"PathRegex":"^/api/*",
"Window":"1h",
"MaxRequests": 50
}
]

Test it Out

All that's left is to test it out. If you go to your terminal and run dotnet run you can try out each of the two endpoints they are available at

http://localhost:5000/api/ratelimited/limited and http://localhost:5000/api/ratelimited/indirectly-limited

You can hit these endpoints repeatedly using:

for n in {1..7}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/limited); sleep 0.5; done

Which will send seven requests, two of which will be rejected after that if you run

for n in {1..47}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/indirectly-limited); sleep 0.5; done

It should reject another two as throttled.

Resources

  • The source code for this tutorial is located in GitHub
- + \ No newline at end of file diff --git a/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html b/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html index a34353ac85..44c573fd91 100644 --- a/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html +++ b/develop/dotnet/aspnetcore/rate-limiting/sliding-window/index.html @@ -4,7 +4,7 @@ How to implement Sliding Window Rate Limiting app using ASP.NET Core & Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to implement Sliding Window Rate Limiting app using ASP.NET Core & Redis

In this tutorial, we'll learn how to build a sliding window rate limiter for ASP.NET Core using Redis.

What is A Sliding Window Rate Limiter

The pattern that we are implementing here is a sliding window rate limiter. A sliding window rate limiter, unlike a fixed window, restricts requests for a discrete window prior to the current request under evaluation. As opposed to a fixed window rate limiter which groups the requests into a bucket based on a very definitive time window. For example, if you have a 10 req/minute rate limiter, on a fixed window, you could encounter a case where the rate-limiter allows 20 requests inside of a minute. That's because if first 10 requests are on the left hand side of the current window, and the next 20 requests are on the right hand side of the window, both have enough space in their respective buckets to be allowed through. If you sent those same 20 requests through a sliding window limited rate limiter on the other hand, if they are all sent within 60 seconds of each other, only 10 will make it through. Using Sorted Sets and Lua scripts, implementing one of these rate limiters is a breeze.

Prerequisites

Startup Redis

Before we begin, startup Redis. For this example, we'll use the Redis docker image:

docker run -p 6379:6379 redis

Create Project

In your terminal, navigate to where you want the app to live and run:

dotnet new webapi -n SlidingWindowRateLimiter --no-https

Cd into the SlidingWindowRateLimiter folder and run the command dotnet add package StackExchange.Redis.

Open SlidingWindowRateLimiter.csproj in Rider, Visual Studio, or open the folder in VS Code. In the Controllers folder, add an API controller called RateLimitedController, when all this is complete, RateLimitedController.cs should look like the following:

namespace SlidingWindowRateLimiter.Controllers
{
[ApiController]
[Route("api/[controller]")]
public class RateLimitedController : ControllerBase
{
}
}

Initialize The Multiplexer

To use Redis, we're going to initialize an instance of the ConnectionMultiplexer from StackExchange.Redis, to do so, go to the ConfigureServices method inside of Startup.cs and add the following line:

services.AddSingleton<IConnectionMultiplexer>(ConnectionMultiplexer.Connect("localhost"));

Inject the ConnectionMultiplexer

In RateLimitedController.cs inject the ConnectionMultiplexer into the controller and pull out an IDatabase object from it with the following:

private readonly IDatabase _db;
public RateLimitedController(IConnectionMultiplexer mux)
{
_db = mux.GetDatabase();
}

Add a Simple Route

We will add a simple route that we will Rate Limit; it will be a POST request route on our controller. This POST request will use Basic auth - this means that each request is going to expect a header of the form Authorization: Basic <base64encoded> where the base64encoded will be a string of the form apiKey:apiSecret base64 encoded, e.g. Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==. This route will parse the key out of the header and return an OK result.

[HttpPost]
[HttpGet]
[Route("sliding")]
public async Task<IActionResult> Sliding([FromHeader]string authorization)
{
var encoded = string.Empty;
if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
return Ok();
}

With that setup, you should run the project with a dotnet run, and if you issue a POST request to https://localhost:5001/api/RateLimited/sliding - with apiKey foobar and password password, you will get a 200 OK response back.

You can use this cURL request to elicit that response:

curl -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/RateLimited/single

Sliding Window Rate Limiter Lua Script

To implement this pattern we will need to do the following:

  1. The client will create a key for the server to check, this key will be of the format route:apikey
  2. That key will map to a sorted set in Redis, we will check the current time, and shave off any requests in the sorted set that are outside of our window
  3. We will then check the cardinality of the sorted set
  4. If the cardinality is less than our limit, we will
    1. Add a new member to our sorted set with a score of the current time in seconds, and a member of the current time in microseconds
    2. Set the expiration for our sorted set to the window length
    3. return 0
  5. If the cardinality is greater than or equal to our limit we will return 1

The trick here is that everything needs to happen atomically, we want to be able to trim the set, check its cardinality, add an item to it, and set it's expiration, all without anything changing in the interim. Fortunately this is a perfect place to use a Lua Script. Specifically we are going to be using the StackExchange script preparation engine to drive our lua script, meaning we can use @variable_name in place of a particular position in ARGV or KEYS in the script. Our Lua script will be:

local current_time = redis.call('TIME')
local trim_time = tonumber(current_time[1]) - @window
redis.call('ZREMRANGEBYSCORE', @key, 0, trim_time)
local request_count = redis.call('ZCARD',@key)

if request_count < tonumber(@max_requests) then
redis.call('ZADD', @key, current_time[1], current_time[1] .. current_time[2])
redis.call('EXPIRE', @key, @window)
return 0
end
return 1

In order to use that in our app, we will create a new static class called Scripts which will hold the text of the script, and prepare the script to run with StackExchange.Redis. Create a new file called Scripts.cs and add the following to it.

using StackExchange.Redis;

namespace SlidingWindowRateLimiter
{
public static class Scripts
{
public static LuaScript SlidingRateLimiterScript => LuaScript.Prepare(SlidingRateLimiter);
private const string SlidingRateLimiter = @"
local current_time = redis.call('TIME')
local trim_time = tonumber(current_time[1]) - @window
redis.call('ZREMRANGEBYSCORE', @key, 0, trim_time)
local request_count = redis.call('ZCARD',@key)

if request_count < tonumber(@max_requests) then
redis.call('ZADD', @key, current_time[1], current_time[1] .. current_time[2])
redis.call('EXPIRE', @key, @window)
return 0
end
return 1
";
}
}

Update the Controller for rate limiting

Back in our RateLimitedController Sliding method, we will add a few lines of code to check if we should throttle the API request, replace the return statement with the following:

var limited = ((int) await _db.ScriptEvaluateAsync(Scripts.SlidingRateLimiterScript,
new {key = new RedisKey($"{Request.Path}:{apiKey}"), window = 30, max_requests = 10})) == 1;
return limited ? new StatusCodeResult(429) : Ok();

The whole method should look like this now:

[HttpPost]
[HttpGet]
[Route("sliding")]
public async Task<IActionResult> Sliding([FromHeader] string authorization)
{
var encoded = string.Empty;
if(!string.IsNullOrEmpty(authorization)) encoded = AuthenticationHeaderValue.Parse(authorization).Parameter;
if (string.IsNullOrEmpty(encoded)) return new UnauthorizedResult();
var apiKey = Encoding.UTF8.GetString(Convert.FromBase64String(encoded)).Split(':')[0];
var limited = ((int) await _db.ScriptEvaluateAsync(Scripts.SlidingRateLimiterScript,
new {key = new RedisKey($"{Request.Path}:{apiKey}"), window = 30, max_requests = 10})) == 1;
return limited ? new StatusCodeResult(429) : Ok();
}

Now, if we start our server back up with dotnet run and try running the following command:

for n in {1..20}; do echo $(curl -s -w " HTTP %{http_code}, %{time_total} s" -X POST -H "Content-Length: 0" --user "foobar:password" http://localhost:5000/api/ratelimited/sliding); sleep 0.5; done

You will see some of your requests return a 200, and 10 will return a 429. If you wait for some and run the above command again you may see some behavior where every other request goes through. That's because the window slides every second and only the previous 30 seconds requests are considered when determining whether to throttle the request. The above command the first time will produce an output something like this:

HTTP 200, 0.081806 s
HTTP 200, 0.003170 s
HTTP 200, 0.002217 s
HTTP 200, 0.001632 s
HTTP 200, 0.001508 s
HTTP 200, 0.001928 s
HTTP 200, 0.001647 s
HTTP 200, 0.001656 s
HTTP 200, 0.001699 s
HTTP 200, 0.001667 s
{"status":429,"traceId":"00-4af32d651483394292e35258d94ec4be-6c174cc42ca1164c-00"} HTTP 429, 0.012612 s
{"status":429,"traceId":"00-7b24da2422f5b144a1345769e210b78a-75cc1deb1f260f46-00"} HTTP 429, 0.001688 s
{"status":429,"traceId":"00-0462c9d489ce4740860ae4798e6c4869-2382f37f7e112741-00"} HTTP 429, 0.001578 s
{"status":429,"traceId":"00-127f5493caf8e044a9f29757fbf91f0a-62187f6cf2833640-00"} HTTP 429, 0.001722 s
{"status":429,"traceId":"00-89a4c2f7e2021a4d90264f9d040d250c-34443a5fdb2cff4f-00"} HTTP 429, 0.001718 s
{"status":429,"traceId":"00-f1505b800f30da4b993bebb89f902401-dfbadcb1bc3b8e45-00"} HTTP 429, 0.001663 s
{"status":429,"traceId":"00-621cf2b2f32c184fb08d0d483788897d-1c01af67cf88d440-00"} HTTP 429, 0.001601 s
{"status":429,"traceId":"00-e310ba5214d7874dbd653a8565f38df4-216f1a4b8c4b574a-00"} HTTP 429, 0.001456 s
{"status":429,"traceId":"00-52a7074239a5e84c9ded96166c0ef042-4dfedf1d60e3fd46-00"} HTTP 429, 0.001550 s
{"status":429,"traceId":"00-5e03e785895f2f459c85ade852664703-c9ad961397284643-00"} HTTP 429, 0.001535 s
{"status":429,"traceId":"00-ba2ac0f8fd902947a4789786b0f683a8-be89b14fa88d954c-00"} HTTP 429, 0.001451 s

Resources

  • You can find the code used for this tutorial in GitHub
- + \ No newline at end of file diff --git a/develop/dotnet/index.html b/develop/dotnet/index.html index 69b51fd6f7..57118061d9 100644 --- a/develop/dotnet/index.html +++ b/develop/dotnet/index.html @@ -4,7 +4,7 @@ .NET and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

.NET and Redis


Profile picture for Steve Lorello
Author:
Steve Lorello, Senior Field Engineer at Redis

Getting Started

The .NET Community has built many client libraries to help handle requests to Redis Server. In this guide, we'll mostly be concerned with using the StackExchange.Redis client library. As the name implies the StackExchange client is developed by StackExchange for use on popular websites like StackOverflow.

Step 1. Install the Package

There are a few ways to Install the Package:

Run the following in the directory of the csproj file you want to add the package too.

  dotnet add package StackExchange.Redis

Step 2. Import the Required Namespace

using StackExchange.Redis;

Step 3. Initialize the ConnectionMultiplexer

The ConnectionMultiplexer is the main arbiter of the connection to Redis inside the CLR, your application should maintain a single instance of the ConnectionMultiplexer throughout its runtime. You can initialize the Multiplexer with either a connection string, or with a ConfigurationOptions object. A typical connection string is of the form: HOST_NAME:PORT_NUMBER,password=PASSWORD where HOST_NAME is the host name of your server (e.g. localhost), PORT_NUMBER is the port number Redis is listening on (e.g. 6379) and PASSWORD is your redis server's password (e.g. secret_password).

static readonly ConnectionMultiplexer _redis = ConnectionMultiplexer.Connect($"{HOST_NAME}:{PORT_NUMBER},password={PASSWORD}");

Step 4. Grab Database Connection

Once we have a handle for the Multiplexer, we need get a connection to the database.

var db = _redis.GetDatabase();

Step 5. Use the connection

Now that you've retreived the connection to the database, all that's left is to use it. Here are some simple operations:

db.Ping();

Redis Launchpad

Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

Rate Limiting App in .NET

Launchpad

Leaderboard App in .NET

Launchpad

API Caching .NET

Launchpad

Basic Chat App .NET

Launchpad

Additional Resources

- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html b/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html index cad83d7e3b..7b1e9c26b7 100644 --- a/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html +++ b/develop/dotnet/redis-om-dotnet/add-and-retrieve-objects/index.html @@ -4,7 +4,7 @@ Add and Retrieve Objects | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Add and Retrieve Objects

The Redis OM library supports declarative storage and retrieval of objects from Redis. Without Redis Stack, this is limited to using hashes, and id lookups of objects in Redis. You will still use the Document Attribute to decorate a class you'd like to store in Redis. From there, all you need to do is either call Insert or InsertAsync on the RedisCollection or Set or SetAsync on the RedisConnection, passing in the object you want to set in Redis. You can then retrieve those objects with Get<T> or GetAsync<T> with the RedisConnection or with FindById or FindByIdAsync in the RedisCollection.

public class Program
{
[Document(Prefixes = new []{"Employee"})]
public class Employee
{
[RedisIdField]
public string Id{ get; set; }

public string Name { get; set; }

public int Age { get; set; }

public double Sales { get; set; }

public string Department { get; set; }
}

static async Task Main(string[] args)
{
var provider = new RedisConnectionProvider("redis://localhost:6379");
var connection = provider.Connection;
var employees = provider.RedisCollection<Employee>();
var employee1 = new Employee{Name="Bob", Age=32, Sales = 100000, Department="Partner Sales"};
var employee2 = new Employee{Name="Alice", Age=45, Sales = 200000, Department="EMEA Sales"};
var idp1 = await connection.SetAsync(employee1);
var idp2 = await employees.InsertAsync(employee2);

var reconstitutedE1 = await connection.GetAsync<Employee>(idp1);
var reconstitutedE2 = await employees.FindByIdAsync(idp2);
Console.WriteLine($"First Employee's name is {reconstitutedE1.Name}, they are {reconstitutedE1.Age} years old, " +
$"they work in the {reconstitutedE1.Department} department and have sold {reconstitutedE1.Sales}, " +
$"their ID is: {reconstitutedE1.Id}");
Console.WriteLine($"Second Employee's name is {reconstitutedE2.Name}, they are {reconstitutedE2.Age} years old, " +
$"they work in the {reconstitutedE2.Department} department and have sold {reconstitutedE2.Sales}, " +
$"their ID is: {reconstitutedE2.Id}");
}
}

The Code above will declare an Employee class, and allow you to add employees to Redis, and then retrieve Employees from Redis the output from this method will look like this:

First Employee's name is Bob, they are 32 years old, they work in the Partner Sales department and have sold 100000, their ID is: 01FHDFE115DKRWZW0XNF17V2RK
Second Employee's name is Alice, they are 45 years old, they work in the EMEA Sales department and have sold 200000, their ID is: 01FHDFE11T23K6FCJQNHVEF92F

If you wanted to find them in Redis directly you could run HGETALL Employee:01FHDFE115DKRWZW0XNF17V2RK and that will retrieve the Employee object as a Hash from Redis. If you do not specify a prefix, the prefix will be the fully-qualified class name.

- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html b/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html index 4dd3449d4d..ae26b812c8 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/apply-functions/index.html @@ -4,7 +4,7 @@ Apply Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Apply Functions

Apply functions are functions that you can define as expressions to apply to your data in Redis. In essence, they allow you to combine your data together, and extract the information you want.

Data Model

For the remainder of this article we will be using this data model:

[Document]
public class Employee
{
[Indexed(Aggregatable = true)]
public string Name { get; set; }

[Indexed]
public GeoLoc? HomeLoc { get; set; }

[Indexed(Aggregatable = true)]
public int Age { get; set; }

[Indexed(Aggregatable = true)]
public double Sales { get; set; }

[Indexed(Aggregatable = true)]
public double SalesAdjustment { get; set; }

[Searchable(Aggregatable = true)]
public string Department { get; set; }

[Indexed(Aggregatable = true)]
public long LastOnline { get; set; } = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
}

Anatomy of an Apply Function

Apply is a method on the RedisAggregationSet<T> class which takes two arguments, each of which is a component of the apply function.

First it takes the expression that you want Redis to execute on every record in the pipeline, this expression takes a single parameter, an AggregationResult<T>, where T is the generic type of your RedisAggregationSet. This AggregationResult has two things we should think about, first it contains a RecordShell which is a placeholder for the generic type, and secondly it has an Aggregations property - which is a dictionary containing the results from your pipeline. Both of these can be used in apply functions.

The second component is the alias, that's the name the result of the function is stored in when the pipeline executes.

Adjusted Sales

Our data model has two properties related to sales, Sales, how much the employee has sold, and SalesAdjustment, a figure used to adjust sales based off various factors, perhaps territory covered, experience, etc. . . The idea being that perhaps a fair way to analyze an employee's performance is a combination of these two fields rather than each individually. So let's say we wanted to find what everyone's adjusted sales were, we could do that by creating an apply function to calculate it.

var adjustedSales = employeeAggregations.Apply(x => x.RecordShell.SalesAdjustment * x.RecordShell.Sales,
"ADJUSTED_SALES");
foreach (var result in adjustedSales)
{
Console.WriteLine($"Adjusted Sales were: {result["ADJUSTED_SALES"]}");
}

Arithmetic Apply Functions

Functions that use arithmetic and math can use the mathematical operators + for addition, - for subtraction, * for multiplication, / for division, and % for modular division, also the ^ operator, which is typically used for bitiwise exclusive-or operations, has been reserved for power functions. Additionally, you can use many System.Math library operations within Apply functions, and those will be translated to the appropriate methods for use by Redis.

Available Math Functions

FunctionTypeDescriptionExample
Log10Mathyields the 10 base log for the numberMath.Log10(x["AdjustedSales"])
AbsMathyields the absolute value of the provided numberMath.Abs(x["AdjustedSales"])
CeilMathyields the smallest integer not less than the provided numberMath.Ceil(x["AdjustedSales"])
FloorMathyields the smallest integer not greater than the provided numberMath.Floor(x["AdjustedSales"])
LogMathyields the Log base 2 for the provided numberMath.Log(x["AdjustedSales"])
ExpMathyields the natural exponent for the provided number (e^y)Math.Exp(x["AdjustedSales"])
SqrtMathyields the Square root for the provided numberMath.Sqrt(x["AdjustedSales"])

String Functions

You can also apply multiple string functions to your data, if for example you wanted to create a birthday message for each employee you could do so by calling String.Format on your records:

var birthdayMessages = employeeAggregations.Apply(x =>
string.Format("Congratulations {0} you are {1} years old!", x.RecordShell.Name, x.RecordShell.Age), "message");
await foreach (var message in birthdayMessages)
{
Console.WriteLine(message["message"].ToString());
}

List of String Functions:

FunctionTypeDescriptionExample
ToUpperStringyields the provided string to upper casex.RecordShell.Name.ToUpper()
ToLowerStringyields the provided string to lower casex.RecordShell.Name.ToLower()
StartsWithStringBoolean expression - yields 1 if the string starts with the argumentx.RecordShell.Name.StartsWith("bob")
ContainsStringBoolean expression - yields 1 if the string contains the argumentx.RecordShell.Name.Contains("bob")
SubstringStringyields the substring starting at the given 0 based index, the length of the second argument, if the second argument is not provided, it will simply return the balance of the stringx.RecordShell.Name.Substring(4, 10)
FormatstringFormats the string based off the provided patternstring.Format("Hello {0} You are {1} years old", x.RecordShell.Name, x.RecordShell.Age)
SplitstringSplit's the string with the provided string - unfortunately if you are only passing in a single splitter, because of how expressions work, you'll need to provide string split options so that no optional parameters exist when building the expression, just pass StringSplitOptions.Nonex.RecordShell.Name.Split(",", StringSplitOptions.None)

Time Functions

You can also perform functions on time data in Redis. If you have a timestamp stored in a useable format, a unix timestamp or a timestamp string that can be translated from strftime, you can operate on them. For example if you wanted to translate a unix timestamp to YYYY-MM-DDTHH:MM::SSZ you can do so by just calling ApplyFunctions.FormatTimestamp on the record inside of your apply function. E.g.

var lastOnline = employeeAggregations.Apply(x => ApplyFunctions.FormatTimestamp(x.RecordShell.LastOnline),
"LAST_ONLINE_STRING");

foreach (var employee in lastOnline)
{
Console.WriteLine(employee["LAST_ONLINE_STRING"].ToString());
}

Time Functions Available

FunctionTypeDescriptionExample
ApplyFunctions.FormatTimestamptimetransforms a unix timestamp to a formatted time string based off strftime conventionsApplyFunctions.FormatTimestamp(x.RecordShell.LastTimeOnline)
ApplyFunctions.ParseTimetimeParsers the provided formatted timestamp to a unix timestampApplyFunctions.ParseTime(x.RecordShell.TimeString, "%FT%ZT")
ApplyFunctions.DaytimeRounds a unix timestamp to the beginning of the dayApplyFunctions.Day(x.RecordShell.LastTimeOnline)
ApplyFunctions.HourtimeRounds a unix timestamp to the beginning of current hourApplyFunctions.Hour(x.RecordShell.LastTimeOnline)
ApplyFunctions.MinutetimeRound a unix timestamp to the beginning of the current minuteApplyFunctions.Minute(x.RecordShell.LastTimeOnline)
ApplyFunctions.MonthtimeRounds a unix timestamp to the beginning of the current monthApplyFunctions.Month(x.RecordShell.LastTimeOnline)
ApplyFunctions.DayOfWeektimeConverts the unix timestamp to the day number with Sunday being 0ApplyFunctions.DayOfWeek(x.RecordShell.LastTimeOnline)
ApplyFunctions.DayOfMonthtimeConverts the unix timestamp to the current day of the month (1..31)ApplyFunctions.DayOfMonth(x.RecordShell.LastTimeOnline)
ApplyFunctions.DayOfYeartimeConverts the unix timestamp to the current day of the year (1..31)ApplyFunctions.DayOfYear(x.RecordShell.LastTimeOnline)
ApplyFunctions.YeartimeConverts the unix timestamp to the current yearApplyFunctions.Year(x.RecordShell.LastTimeOnline)
ApplyFunctions.MonthOfYeartimeConverts the unix timestamp to the current month (0..11)ApplyFunctions.MonthOfYear(x.RecordShell.LastTimeOnline)

Geo Distance

Another useful function is the GeoDistance function, which allows you computer the distance between two points, e.g. if you wanted to see how far away from the office each employee was you could use the ApplyFunctions.GeoDistance function inside your pipeline:

var officeLoc = new GeoLoc(-122.064181, 37.377207);
var distanceFromWork =
employeeAggregations.Apply(x => ApplyFunctions.GeoDistance(x.RecordShell.HomeLoc, officeLoc), "DistanceToWork");
await foreach (var element in distancesFromWork)
{
Console.WriteLine(element["DistanceToWork"].ToString());
}
- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html b/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html index e7bda0f05b..3e645db73e 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/groups/groups/index.html @@ -4,7 +4,7 @@ Grouping and Reductions | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Grouping and Reductions

Grouping and reducing operations using aggregations can be extremely powerful.

What Is a Group

A group is simply a group of like records in Redis.

e.g.

{
"Name":"Susan",
"Department":"Sales",
"Sales":600000
}

{
"Name":"Tom",
"Department":"Sales",
"Sales":500000
}

If grouped together by Department would be one group. When grouped by Name, they would be two groups.

Reductions

What makes groups so useful in Redis Aggregations is that you can run reductions on them to aggregate items within the group. For example, you can calculate summary statistics on numeric fields, retrieve random samples, distinct counts, approximate distinct counts of any aggregatable field in the set.

Using Groups and Reductions with Redis OM .NET

You can run reductions against an RedisAggregationSet either with or without a group. If you run a reduction without a group, the result of the reduction will materialize immediately as the desired type. If you run a reduction against a group, the results will materialize when they are enumerated.

Reductions without a Group

If you wanted to calculate a reduction on all the records indexed by Redis in the collection, you would simply call the reduction on the RedisAggregationSet

var sumSales = employeeAggregations.Sum(x=>x.RecordShell.Sales);
Console.WriteLine($"The sum of sales for all employees was {sumSales}");

Reductions with a Group

If you want to build a group to run reductions on, e.g. you wanted to calculate the average sales in a department, you would use a GroupBy predicate to specify which field or fields to group by. If you want to group by 1 field, your lambda function for the group by will yield just the field you want to group by. If you want to group by multiple fields, new up an anonymous type in line:

var oneFieldGroup = employeeAggregations.GroupBy(x=>x.RecordShell.Department);

var multiFieldGroup = employeeAggregations.GroupBy(x=>new {x.RecordShell.Department, x.RecordShell.WorkLoc});

From here you can run reductions on your groups. To run a Reduction, execute a reduction function. When the collection materializes the AggregationResult<T> will have the reduction stored in a formatted string which is the PropertyName_COMMAND_POSTFIX, see supported operations table below for postfixes. If you wanted to calculate the sum of the sales of all the departments you could:

var departments = employeeAggregations.GroupBy(x=>x.RecordShell.Department).Sum(x=>x.RecordShell.Sales);
foreach(var department in departments)
{
Console.WriteLine($"The {department[nameof(Employee.Department)]} department sold {department["Sales_SUM"]}");
}
Command NameCommand PostfixDescription
CountCOUNTnumber of records meeting the query, or in the group
CountDistinctCOUNT_DISTINCTCounts the distinct occurrences of a given property in a group
CountDistinctishCOUNT_DISTINCTISHProvides an approximate count of distinct occurrences of a given property in each group - less expensive computationally but does have a small 3% error rate
SumSUMThe sum of all occurrences of the provided field in each groupb
MinMINMinimum occurrence for the provided field in each group
MaxMAXMaximum occurrence for the provided field in each group
AverageAvgArithmetic mean of all the occurrences for the provided field in a group
StandardDeviationSTDDEVStandard deviation from the arithmetic mean of all the occurrences for the provided field in each group
QuantileQUANTLEThe value of a record at the provided quantile for a field in each group, e.g., the Median of the field would be sitting at quantile .5
DistinctTOLISTEnumerates all the distinct values of a given field in each group
FirstValueFIRST_VALUERetrieves the first occurrence of a given field in each group
RandomSampleRANDOMSAMPLE{NumRecords}Random sample of the given field in each group

Closing Groups

When you invoke a GroupBy the type of return type changes from RedisAggregationSet to a GroupedAggregationSet. In some instances you may need to close a group out and use its results further down the pipeline. To do this, all you need to do is call CloseGroup on the GroupedAggregationSet - that will end the group predicates and allow you to use the results further down the pipeline.

- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/aggregations/index.html b/develop/dotnet/redis-om-dotnet/aggregations/index.html index 3f998fb06a..1d6374676e 100644 --- a/develop/dotnet/redis-om-dotnet/aggregations/index.html +++ b/develop/dotnet/redis-om-dotnet/aggregations/index.html @@ -4,7 +4,7 @@ Aggregations Intro | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Aggregations Intro

Aggregations are a method of grouping documents together and run processing on them on the server to transform them into data that you need in your application, without having to perform the computation client-side.

Anatomy of a Pipeline

Aggregations in Redis are build around an aggregation pipeline, you will start off with a RedisAggregationSet<T> of objects that you have indexed in Redis. From there you can

  • Query to filter down the results you want
  • Apply functions to them to combine functions to them
  • Group like features together
  • Run reductions on groups
  • Sort records
  • Further filter down records

Setting up for Aggregations

Redis OM .NET provides an RedisAggregationSet<T> class that will let you perform aggregations on employees, let's start off with a trivial aggregation. Let's start off by defining a model:

[Document]
public class Employee
{
[Indexed]
public string Name { get; set; }

[Indexed]
public GeoLoc? HomeLoc { get; set; }

[Indexed(Aggregatable = true)]
public int Age { get; set; }

[Indexed(Aggregatable = true)]
public double Sales { get; set; }

[Indexed(Aggregatable = true)]
public double SalesAdjustment { get; set; }

[Searchable(Aggregatable = true)]
public string Department { get; set; }
}

We'll then create the index for that model, pull out a RedisAggregationSet<T> from our provider, and initialize the index, and seed some data into our database

var provider = new RedisConnectionProvider("redis://localhost:6379");
await provider.Connection.CreateIndexAsync(typeof(Restaurant));
var employees = provider.RedisCollection<Employee>();
var employeeAggregations = provider.AggregationSet<Employee>();
var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, SalesAdjustment = 1.5, Department = "EMEA Sales"};
var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, SalesAdjustment = 1.02, Department = "Partner Sales"};
var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, SalesAdjustment = 1.1, Department = "NA Sales"};
var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, SalesAdjustment = .95, Department = "EMEA Sales"};
var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, SalesAdjustment = .9, Department = "APAC Sales"};
var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
employees.Insert(e1);
employees.Insert(e2);
employees.Insert(e3);
employees.Insert(e4);
employees.Insert(e5);
employees.Insert(e6);

The AggregationResult

The Aggregations pipeline is all built around the RedisAggregationSet<T> this Set is generic, so you can provide the model that you want to build your aggregations around (an Indexed type), but you will notice that the return type from queries to the RedisAggregationSet is the generic type passed into it. Rather it is an AggregationResult<T> where T is the generic type you passed into it. This is a really important concept, when results are returned from aggregations, they are not hydrated into an object like they are with queries. That's because Aggregations aren't meant to pull out your model data from the database, rather they are meant to pull out aggregated results. The AggregationResult has a RecordShell field, which is ALWAYS null outside of the pipeline. It can be used to build expressions for querying objects in Redis, but when the AggregationResult lands, it will not contain a hydrated record, rather it will contain a dictionary of Aggregations built by the Aggregation pipeline. This means that you can access the results of your aggregations by indexing into the AggregationResult.

Simple Aggregations

Let's try running an aggregation where we find the Sum of the sales for all our employees in EMEA. So the Aggregations Pipeline will use the RecordShell object, which is a reference to the generic type of the aggregation set, for something as simple as a group-less SUM, you will simply get back a numeric type from the aggregation.

var sumOfSalesEmea = employeeAggregations.Where(x => x.RecordShell.Department == "EMEA")
.Sum(x => x.RecordShell.Sales);
Console.WriteLine($"EMEA sold:{sumOfSalesEmea}");

The Where expression tells the aggregation pipeline which records to consider, and subsequently the SUM expression indicates which field to sum. Aggregations are a rich feature and this only scratches the surface of it, these pipelines are remarkably flexible and provide you the ability to do all sorts of neat operations on your Data in Redis.

- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html b/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html index e7717d5168..8d37bac6fd 100644 --- a/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/creating-an-index/index.html @@ -4,7 +4,7 @@ Creating an Index with Redis OM | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Creating an Index with Redis OM

To unlock some of the nicest functionality of Redis OM, e.g., running searches, matches, aggregations, reductions, mappings, etc... You will need to tell Redis how you want data to be stored and how you want it indexed. One of the features the Redis OM library provides is creating indices that map directly to your objects by declaring the indices as attributes on your class.

Let's start with an example class.

[Document]
public partial class Person
{
[RedisIdField]
public string Id { get; set; }

[Searchable(Sortable = true)]
public string Name { get; set; }

[Indexed(Aggregatable = true)]
public GeoLoc? Home { get; set; }

[Indexed(Aggregatable = true)]
public GeoLoc? Work { get; set; }

[Indexed(Sortable = true)]
public int? Age { get; set; }

[Indexed(Sortable = true)]
public int? DepartmentNumber { get; set; }

[Indexed(Sortable = true)]
public double? Sales { get; set; }

[Indexed(Sortable = true)]
public double? SalesAdjustment { get; set; }

[Indexed(Sortable = true)]
public long? LastTimeOnline { get; set; }

[Indexed(Aggregatable = true)]
public string Email { get; set; }
}

As shown above, you can declare a class as being indexed with the Document Attribute. In the Document attribute, you can set a few fields to help build the index:

Property NameDescriptionDefaultOptional
StorageTypeDefines the underlying data structure used to store the object in Redis, options are HASH and JSON, Note JSON is only useable with Redis StackHASHtrue
IndexNameThe name of the index$"{SimpleClassName.ToLower()}-idx}true
PrefixesThe key prefixes for redis to build an index off ofnew string[]{$"{FullyQualifiedClassName}:"}true
LanguageLanguage to use for full-text search indexingnulltrue
LanguageFieldThe name of the field in which the document stores its Languagenulltrue
FilterThe filter to use to determine whether a particular item is indexed, e.g. @Age>=18nulltrue
IdGenerationStrategyThe strategy used to generate Ids for documents, if left blank it will use a ULID generation strategyUlidGenerationStrategytrue

Field Level Declarations

Id Fields

Every class indexed by Redis must contain an Id Field marked with the RedisIdField.

Indexed Fields

In addition to declaring an Id Field, you can also declare indexed fields, which will let you search for values within those fields afterward. There are two types of Field level attributes.

  1. Indexed - This type of index is valid for fields that are of the type string, a Numeric type (double/int/float etc. . .), or can be decorated for fields that are of the type GeoLoc, the exact way that the indexed field is interpreted depends on the indexed type
  2. Searchable - This type is only valid for string fields, but this enables full-text search on the decorated fields.

IndexedAttribute Properties

There are properties inside the IndexedAttribute that let you further customize how things are stored & queried.

PropertyNametypeDescriptionDefaultOptional
PropertyNamestringThe name of the property to be indexedThe name of the property being indexedtrue
SortableboolWhether to index the item so it can be sorted on in queries, enables use of OrderBy & OrderByDescending -> collection.OrderBy(x=>x.Email)falsetrue
NormalizeboolOnly applicable for string type fields Determines whether the text in a field is normalized (sent to lower case) for purposes of sortingtruetrue
SeparatorcharOnly applicable for string type fields Character to use for separating tag field, allows the application of multiple tags fo the same item e.g. article.Category = technology,parenting is delineated by a , means that collection.Where(x=>x.Category == "technology") and collection.Where(x=>x.Category == "parenting") will both match the record``true
CaseSensitiveboolOnly applicable for string type fields - Determines whether case is considered when performing matches on tagsfalsetrue

SearchableAttribute Properties

There are properties for the SearchableAttribute that let you further customize how the full-text search determines matches

PropertyNametypeDescriptionDefaultOptional
PropertyNamestringThe name of the property to be indexedThe name of the indexed propertytrue
SortableboolWhether to index the item so it can be sorted on in queries, enables use of OrderBy & OrderByDescending -> collection.OrderBy(x=>x.Email)falsetrue
NoStemboolDetermines whether to use stemming, in other words adding the stem of the word to the index, setting to true will stop the Redis from indexing the stems of wordsfalsetrue
PhoneticMatcherstringThe phonetic matcher to use if you'd like the index to use (PhoneticMatching)[https://oss.redis.com/redisearch/Phonetic_Matching/] with the indexnulltrue
Weightdoubledetermines the importance of the field for checking result accuracy1.0true

Creating The Index

After declaring the index, the creation of the index is pretty straightforward. All you have to do is call CreateIndex for the decorated type. The library will take care of serializing the provided type into a searchable index. The library does not try to be particularly clever, so if the index already exists it will the creation request will be rejected, and you will have to drop and re-add the index (migrations is a feature that may be added in the future)

var connection = provider.Connection;
connection.CreateIndex(typeof(Person));
- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html b/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html index 02960d3043..dbc65aa9fc 100644 --- a/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/geo-filters/index.html @@ -4,7 +4,7 @@ Geo Filters | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Geo Filters

A really nifty bit of indexing you can do with Redis OM is geo-indexing. To GeoIndex, all you need to do is to mark a GeoLoc field in your model as Indexed and create the index

[Document]
public class Restaurant
{
[Indexed]
public string Name { get; set; }

[Indexed]
public GeoLoc Location{get; set;}

[Indexed(Aggregatable = true)]
public double CostPerPerson{get;set;}
}

So let's create the index and seed some data.

// connect
var provider = new RedisConnectionProvider("redis://localhost:6379");

// get connection
var connection = provider.Connection;

// get collection
var restaurants = provider.RedisCollection<Restaurant>();

// Create index
await connection.CreateIndexAsync(typeof(Restaurant));

// seed with dummy data
var r1 = new Restaurant {Name = "Tony's Pizza & Pasta", CostPerPerson = 12.00, Location = new (-122.076751,37.369929)};
var r2 = new Restaurant {Name = "Nizi Sushi", CostPerPerson = 16.00, Location = new (-122.057360,37.371207)};
var r3 = new Restaurant {Name = "Thai Thai", CostPerPerson = 11.50, Location = new (-122.04382,37.38)};
var r4 = new Restaurant {Name = "Chipotles", CostPerPerson = 8.50, Location = new (-122.0524,37.359719 )};
restaurants.Insert(r1);
restaurants.Insert(r2);
restaurants.Insert(r3);
restaurants.Insert(r4);

Querying Based off Location

With our data seeded, we can now run geo-filters on our restaurants data, let's say we had an office (e.g. Redis's offices in Mountain View at -122.064224,37.377266) and we wanted to find nearby restaurants, we could do so by using a GeoFilter query restaurants within a certain radius, say 1 mile we can:

var nearbyRestaurants = restaurants.GeoFilter(x => x.Location, -122.064224, 37.377266, 5, GeoLocDistanceUnit.Miles);
foreach (var restaurant in nearbyRestaurants)
{
Console.WriteLine($"{restaurant.Name} is within 1 mile of work");
}
- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html b/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html index 81a4350a63..be59996d3f 100644 --- a/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html +++ b/develop/dotnet/redis-om-dotnet/searching/numeric-queries/index.html @@ -4,7 +4,7 @@ Numeric Queries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Numeric Queries

In addition to providing capabilities for text queries, Redis OM also provides you the ability to perform numeric equality and numeric range queries. Let us assume a model of:

[Document]
public class Employee
{
[Indexed]
public string Name { get; set; }

[Indexed(Aggregatable = true)]
public int Age { get; set; }

[Indexed(Aggregatable = true)]
public double Sales { get; set; }

[Searchable(Aggregatable = true)]
public string Department { get; set; }
}

Assume that we've connected to Redis already and retrieved a RedisCollection and seeded some data as such:

var employees = provider.RedisCollection<Employee>();
var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, Department = "EMEA Sales"};
var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, Department = "Partner Sales"};
var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, Department = "NA Sales"};
var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, Department = "EMEA Sales"};
var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, Department = "APAC Sales"};
var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
employees.Insert(e1);
employees.Insert(e2);
employees.Insert(e3);
employees.Insert(e4);
employees.Insert(e5);
employees.Insert(e6);

We can now perform queries against the numeric values in our data as you would with any other collection using LINQ expressions.

var underThirty = employees.Where(x=>x.Age < 30);
var middleTierSales = employees.Where(x=>x.Sales > 100000 && x.Sales < 300000);

You can of course also pair numeric queries with Text Queries:

var emeaMidTier = employees.Where(x=>x.Sales>100000 & x.Sales <300000 && x.Department == "EMEA");

Sorting

If an Indexed field is marked as Sortable, or Aggregatable, you can order by that field using OrderBy predicates.

var employeesBySales = employees.OrderBy(x=>x.Sales);
var employeesBySalesDescending = employees.OrderByDescending(x=>x.Sales);
- + \ No newline at end of file diff --git a/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html b/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html index b7c07efcd6..b9cd8ef285 100644 --- a/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html +++ b/develop/dotnet/redis-om-dotnet/simple-text-queries/index.html @@ -4,7 +4,7 @@ Simple Text Queries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Simple Text Queries

The RedisCollection provides a fluent interface for querying objects stored in redis. This means that if you store an object in Redis with the Redis OM library, and you have Redis Stack running, you can query objects stored in Redis with ease using the LINQ syntax you're used to.

Define the Model

Let's start off by defining a model that we will be using for querying, we will use a Employee Class which will have some basic stuff we may want to query in it

[Document]
public class Employee
{
[Indexed]
public string Name { get; set; }

[Indexed(Aggregatable = true)]
public int Age { get; set; }

[Indexed(Aggregatable = true)]
public double Sales { get; set; }

[Searchable(Aggregatable = true)]
public string Department { get; set; }
}

Connect to Redis

Now we will initialize a RedisConnectionProvider, and grab a handle to a RedisCollection for Employee

static async Task Main(string[] args)
{
var provider = new RedisConnectionProvider("redis://localhost:6379");
var connection = provider.Connection;
var employees = prover.RedisCollection<Employee>();
await connection.CreateIndexAsync(typeof(Employee));
}

Create our Index

Next we'll create the index, so next in our Main method, let's take our type and condense it into an index

Seed some Data

Next we'll seed a few piece of data in our database to play around with:

var e1 = new Employee {Name = "Bob", Age = 35, Sales = 100000, Department = "EMEA Sales"};
var e2 = new Employee {Name = "Alice", Age = 52, Sales = 300000, Department = "Partner Sales"};
var e3 = new Employee {Name = "Marcus", Age = 42, Sales = 250000, Department = "NA Sales"};
var e4 = new Employee {Name = "Susan", Age = 27, Sales = 200000, Department = "EMEA Sales"};
var e5 = new Employee {Name = "John", Age = 38, Sales = 275000, Department = "APAC Sales"};
var e6 = new Employee {Name = "Theresa", Age = 30, Department = "EMEA Ops"};
var insertTasks = new []
{
employees.InsertAsync(e1),
employees.InsertAsync(e2),
employees.InsertAsync(e3),
employees.InsertAsync(e4),
employees.InsertAsync(e5)
employees.InsertAsync(e6)
};
await Task.WhenAll(insertTasks);

Simple Text Query of an Indexed Field

With these data inserted into our database, we can now go ahead and begin querying. Let's start out by trying to query people by name. We can search for all employees named Susan with a simple Where predicate:

var susans = employees.Where(x => x.Name == "Susan");
await foreach (var susan in susans)
{
Console.WriteLine($"Susan is {susan.Age} years old and works in the {susan.Department} department ");
}

The Where Predicates also support and/or operators, e.g. to find all employees named Alice or Bob you can use:

var AliceOrBobs = employees.Where(x => x.Name == "Alice" || x.Name == "Bob");
await foreach (var employee in AliceOrBobs)
{
Console.WriteLine($"{employee.Name} is {employee.Age} years old and works in the {employee.Department} Department");
}

Limiting Result Object Fields

When you are querying larger Documents in Redis, you may not want to have to drag back the entire object over the network, in that case you can limit the results to only what you want using a Select predicate. E.g. if you only wanted to find out the ages of employees, all you would need to do is select the age of employees:

var employeeAges = employees.Select(x => x.Age);
await foreach (var age in employeeAges)
{
Console.WriteLine($"age: {age}");
}

Or if you want to select more than one field you can create a new anonymous object:

var employeeAges = employees.Select(x => new {x.Name, x.Age});
await foreach (var e in employeeAges)
{
Console.WriteLine($"{e.Name} is age: {e.Age} years old");
}

Limiting Returned Objects

You can limit the size of your result (in the number of objects returned) with Skip & Take predicates. Skip will skip over the specified number of records, and Take will take only the number of records provided (at most);

var people = employees.Skip(1).Take(2);
await foreach (var e in people)
{
Console.WriteLine($"{e.Name} is age: {e.Age} years old");
}

There are two types of attributes that can decorate strings, Indexed, which we've gone over and Searchable which we've yet to discuss. The Searchable attribute considers equality slightly differently than Indexed, it operates off a full-text search. In expressions involving Searchable fields, equality—==— means a match. A match in the context of a searchable field is not necessarily a full exact match but rather that the string contains the search text. Let's look at some examples.

Find Employee's in Sales

So we have a Department string which is marked as Searchable in our Employee class. Notice how we've named our departments. They contain a region and a department type. If we wanted only to find all employee's in Sales we could do so with:

var salesPeople = employees.Where(x => x.Department == "Sales");
await foreach (var employee in salesPeople)
{
Console.WriteLine($"{employee.Name} is in the {employee.Department} department");
}

This will produce:

Bob is in the EMEA Sales department
Alice is in the Partner Sales department
Marcus is in the NA Sales department
Susan is in the EMEA Sales department
John is in the APAC Sales department

Because they are all folks in departments called sales

If you wanted to search for everyone in a department in EMEA you could search with:

var emeaFolks = employees.Where(x => x.Department == "EMEA");
await foreach (var employee in emeaFolks)
{
Console.WriteLine($"{employee.Name} is in the {employee.Department} department");
}

Which of course would produce:

Bob is in the EMEA Sales department
Susan is in the EMEA Sales department
Theresa is in the EMEA Ops department

Sorting

If a Searchable or Indexed field is marked as Sortable, or Aggregatable, you can order by that field using OrderBy predicates.

var employeesBySales = employees.OrderBy(x=>x.Name);
var employeesBySalesDescending = employees.OrderByDescending(x=>x.Name);
- + \ No newline at end of file diff --git a/develop/dotnet/streams/stream-basics/index.html b/develop/dotnet/streams/stream-basics/index.html index dcdf354766..855aab9d2d 100644 --- a/develop/dotnet/streams/stream-basics/index.html +++ b/develop/dotnet/streams/stream-basics/index.html @@ -4,7 +4,7 @@ How to use Redis Streams with .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to use Redis Streams with .NET

Redis Streams are a powerful data structure that allows you to use Redis as a sort of Message bus to transport messages between different application components. The way streams operate in Redis is very fast and memory efficient. This article will not go over the minutia of every command available for Redis Streams, but rather it's aimed to provide a high-level tutorial for how you can use Redis Streams with .NET.

Start Redis

The first thing we'll want to do is start Redis. If you already have an instance of Redis, you can ignore this bit and adjust the connection step below to connect to your instance of Redis. Redis is straightforward to get up and running; you can do so using docker:

docker run -p 6379:6379 redis

Create your .NET app

For simplicity's sake, we'll stick to a simple console app, from which we'll spin out a few tasks that will perform the various add/read operations that we'll use. Create a new console app with the dotnet new command:

dotnet new console -n RedisStreamsBasics

Add StackExchange.Redis package

Next, we'll need to add the client library that we will use to interface with Redis StackExchange.Redis is the canonical package, thus, we will use that in this example. First cd into the RedisStreamsBasics directory and then run the dotnet add package directory:

cd RedisStreamsBasics
dotnet add package StackExchange.Redis

Initialize the Multiplexer

StackExchange.Redis centers more or less around the ConnectionMultiplexer, which handles the routing and queuing of all commands that you send to Redis. So our first step that's code-related is to initialize the Multiplexer. Creating the Multiplexer is pretty straightforward; open up Program.cs in your IDE and add the following bit to it:

using StackExchange.Redis;

var tokenSource = new CancellationTokenSource();
var token = tokenSource.Token;

var muxer = ConnectionMultiplexer.Connect("localhost");
var db = muxer.GetDatabase();

const string streamName = "telemetry";
const string groupName = "avg";

We're also initializing a CancellationToken and CancellationTokenSource here. We'll set these up towards the end of this tutorial so that this application does not run endlessly. Also, we're creating a couple of constants, the stream's name and the group's name, that we'll use later, and we are also grabbing an IDatabase object from the Multiplexer to use

Create the consumer group

A Consumer Group in a Redis Stream allows you to group a bunch of consumers to pull messages off the stream for the group. This functionality is excellent when you have high throughput workloads, and you want to scale out the workers who will process your messages. To use a consumer group, you first need to create it. To create a consumer group, you'll use the StreamCreateConsumerGroupAsync method, passing in the streamName and groupName, as well as the starting id - we'll use the 0-0 id (the lowest id allowable in Redis Streams). Before invoking this call, it's wise to validate that the group doesn't exist yet, as creating an already existing user group will result in an error. So first, we'll check if the stream exists; if it doesn't, we can create the group. Next, we'll use the stream info method to see if any groups match the avg groupName.

if (!(await db.KeyExistsAsync(streamName)) ||
(await db.StreamGroupInfoAsync(streamName)).All(x=>x.Name!=groupName))
{
await db.StreamCreateConsumerGroupAsync(streamName, groupName, "0-0", true);
}

Spin up producer task

Three tasks will run in parallel for our program. The first is the producerTask. This Task will write a random number between 50 and 65 as the temp and send the current time as the time.

var producerTask = Task.Run(async () =>
{
var random = new Random();
while (!token.IsCancellationRequested)
{
await db.StreamAddAsync(streamName,
new NameValueEntry[]
{new("temp", random.Next(50, 65)), new NameValueEntry("time", DateTimeOffset.Now.ToUnixTimeSeconds())});
await Task.Delay(2000);
}
});

Parser helper function for reading results

The results retrieved from Redis will be in a reasonably readable form; all the same, it is helpful for our purposes to parse the result into a dictionary. To do this, add an inline function to handle the parsing:

Dictionary<string, string> ParseResult(StreamEntry entry) => entry.Values.ToDictionary(x => x.Name.ToString(), x => x.Value.ToString());
note

Stream messages enforce no requirement that field names be unique. We use a dictionary for clarity sake in this example, but you will need to ensure that you are not passing in multiple fields with the same names in your usage to prevent an issue using a dictionary.

Spin up most recent element task

Next, we'll need to spin up a task to read the most recent element off of the stream. To do this, we'll use the StreamRangeAsync method passing in two special ids, - which means the lowest id, and +, which means the highest id. Running this command will result in some duplication. This redundancy is necessary because the StackExchange.Redis library does not support blocking stream reads and does not support the special $ character for stream reads. For this tutorial, you can manage these most-recent reads with the following code:

var readTask = Task.Run(async () =>
{
while (!token.IsCancellationRequested)
{
var result = await db.StreamRangeAsync(streamName, "-", "+", 1, Order.Descending);
if (result.Any())
{
var dict = ParseResult(result.First());
Console.WriteLine($"Read result: temp {dict["temp"]} time: {dict["time"]}");
}

await Task.Delay(1000);
}
});

Spin up consumer group read Task

The final Task we'll spin up is the read task for the consumer group. Due to the nature of consumer groups, you can spin this Task up multiple times to scale out the processing as needed. It's the responsibility of Redis to keep track of which messages it's distributed to the consumer group. As well as tracking which messages Consumers have acknowledged. Acknowledging messages adds a layer of validation that all messages were processed. If something happens to one of your processing tasks or processes, you can more easily know what messages you missed.

We'll check to see if we have a recent message-id to handle all of this. If we do, we will send an acknowledgment to the server that the id was processed. Then we will grab the next message to be processed from the stream, pull out the data and the id and print out the result.

double count = default;
double total = default;

var consumerGroupReadTask = Task.Run(async () =>
{
string id = string.Empty;
while (!token.IsCancellationRequested)
{
if (!string.IsNullOrEmpty(id))
{
await db.StreamAcknowledgeAsync(streamName, groupName, id);
id = string.Empty;
}
var result = await db.StreamReadGroupAsync(streamName, groupName, "avg-1", ">", 1);
if (result.Any())
{
id = result.First().Id;
count++;
var dict = ParseResult(result.First());
total += double.Parse(dict["temp"]);
Console.WriteLine($"Group read result: temp: {dict["temp"]}, time: {dict["time"]}, current average: {total/count:00.00}");
}
await Task.Delay(1000);
}
});

Set timeout and await tasks

Finally, we need to set the timeout and await the tasks at the end of our program:

tokenSource.CancelAfter(TimeSpan.FromSeconds(20));
await Task.WhenAll(producerTask, readTask, consumerGroupReadTask);

Run the app

You can now run this app with the dotnet run command.

Resources:

  • The source for this tutorial is in GitHub
  • Redis University has an extensive course on Redis Streams where you can learn everything you need to know about them.
  • You can learn more about Redis Streams in the Streams Info article on redis.io
- + \ No newline at end of file diff --git a/develop/guides/netlify/getting-started/index.html b/develop/guides/netlify/getting-started/index.html index 7e697632d2..92c755b672 100644 --- a/develop/guides/netlify/getting-started/index.html +++ b/develop/guides/netlify/getting-started/index.html @@ -4,7 +4,7 @@ Getting Started with Netlify | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Getting Started with Netlify

Step 1. Preparing the local environment

- + \ No newline at end of file diff --git a/develop/index.html b/develop/index.html index 151b3b33f7..612772e1d4 100644 --- a/develop/index.html +++ b/develop/index.html @@ -4,7 +4,7 @@ Develop your application using programming languages | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Develop your application using programming languages

Find documentation, sample code and tools to develop with your favorite language.

Getting Started with Node.js and Redis
Getting Started with Ruby and Redis
Getting Started with PHP and Redis
- + \ No newline at end of file diff --git a/develop/java/getting-started/index.html b/develop/java/getting-started/index.html index d9b1a7366b..63be15bf19 100644 --- a/develop/java/getting-started/index.html +++ b/develop/java/getting-started/index.html @@ -4,7 +4,7 @@ Java and Redis | The Home of Redis Developers - + @@ -12,12 +12,12 @@

Java and Redis

Find tutorials, examples and technical articles that will help you to develop with Redis and Java.

Getting Started

Java community has built many client libraries that you can find here. For your first steps with Java and Redis, this article will show how to use Jedis, the supported Redis client for Java.

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Run a Redis server

You can either run Redis in a Docker container or directly on your machine. -Use these commands to setup a Redis server locally on Mac OS:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
INFO

Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack provides the following in addition to Redis Open Source: JSON, Search, Time Series, and Probabilistic data structures.

Ensure that you are able to use the following Redis command to connect to the Redis instance.

 redis-cli
localhost>

Using Jedis

Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:

 <dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>3.4.0</version>
</dependency>

Step 2. Import the required classes

 import redis.clients.jedis.*;

Step 3. Create a Connection Pool

Once you have added the Jedis library to your project and imported the necessary classes you can create a connection pool.

You can find more information about Jedis connection pool in the Jedis Wiki. The connection pool is based on the Apache Common Pool 2.0 library.

  JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);

Step 4. Write your application code

Once you have access to the connection pool you can now get a Jedis instance and start to interact with your Redis instance.

  // Create a Jedis connection pool
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);

// Get the pool and use the database
try (Jedis jedis = jedisPool.getResource()) {

jedis.set("mykey", "Hello from Jedis");
String value = jedis.get("mykey");
System.out.println( value );

jedis.zadd("vehicles", 0, "car");
jedis.zadd("vehicles", 0, "bike");
Set<String> vehicles = jedis.zrange("vehicles", 0, -1);
System.out.println( vehicles );

}

// close the connection pool
jedisPool.close();

Find more information about Java & Redis connections in the "Redis Connect".

Redis Launchpad

Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. +Use these commands to setup a Redis server locally on Mac OS:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
INFO

Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack provides the following in addition to Redis Open Source: JSON, Search, Time Series, and Probabilistic data structures.

Ensure that you are able to use the following Redis command to connect to the Redis instance.

 redis-cli
localhost>

Using Jedis

Step 1. Add dependencies Jedis dependency to your Maven (or Gradle) project file:

 <dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>5.0.2</version>
</dependency>

Step 2. Import the required classes

 import redis.clients.jedis.*;

Step 3. Create a Connection Pool

Once you have added the Jedis library to your project and imported the necessary classes you can create a connection pool.

You can find more information about Jedis connection pool in the Jedis Wiki. The connection pool is based on the Apache Common Pool 2.0 library.

  JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);

Step 4. Write your application code

Once you have access to the connection pool you can now get a Jedis instance and start to interact with your Redis instance.

  // Create a Jedis connection pool
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379);

// Get the pool and use the database
try (Jedis jedis = jedisPool.getResource()) {

jedis.set("mykey", "Hello from Jedis");
String value = jedis.get("mykey");
System.out.println( value );

jedis.zadd("vehicles", 0, "car");
jedis.zadd("vehicles", 0, "bike");
Set<String> vehicles = jedis.zrange("vehicles", 0, -1);
System.out.println( vehicles );

}

// close the connection pool
jedisPool.close();

Find more information about Java & Redis connections in the "Redis Connect".

Redis Launchpad

Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

Movie Database app in Java

launchpad

Movie Database app in Java based on Search capabilities

Leaderboard app in Java

launchpad

How to implement leaderboard app using Redis & Java(Spring)

Ecosystem

As developer you can use the Java client library directly in your application, or you can frameworks like: Spring, Quarkus, Vert.x, and Micronaut.

Develop with Spring

Spring logo

Spring Data Redis, part of the larger Spring Data project. It provides easy access to Redis from Spring applications.

Develop with Quarkus

Quarkus logo

Redis Client extension allows you to connect your Quarkus application to a Redis instance.

Develop with Vert.x

Vert.x logo

Eclipse Vert.x is a framework to build reactive applications on the JVM. Vert.x-redis is redis client to be used with Vert.x.

Develop with Micronaut

Micronaut logo

Micronaut is a framework for building microservices and serverless applications. The Micronaut Redis extension provides the integration with Redis.


More developer resources

Brewdis - Product Catalog (Spring) See how to use Redis and Spring to build a product catalog with streams, hashes and Search

Redis Stream in Action (Spring) See how to use Spring to create multiple producer and consumers with Redis Streams

Rate Limiting with Vert.x -See how to use Redis Sorted Set with Vert.x to build a rate limiting service.

Redis University

Redis for Java Developers

Redis for Java Developers teaches you how to build robust Redis client applications in Java using the Jedis client library. The course focuses on writing idiomatic Java applications with the Jedis API, describing language-specific patterns for managing Redis database connections, handling errors, and using standard classes from the JDK. The course material uses the Jedis API directly with no additional frameworks. As such, the course is appropriate for all Java developers, and it clearly illustrates the principles involved in writing applications with Redis.

- +See how to use Redis Sorted Set with Vert.x to build a rate limiting service.

Redis University

Redis for Java Developers

Redis for Java Developers teaches you how to build robust Redis client applications in Java using the Jedis client library. The course focuses on writing idiomatic Java applications with the Jedis API, describing language-specific patterns for managing Redis database connections, handling errors, and using standard classes from the JDK. The course material uses the Jedis API directly with no additional frameworks. As such, the course is appropriate for all Java developers, and it clearly illustrates the principles involved in writing applications with Redis.

+ \ No newline at end of file diff --git a/develop/java/index.html b/develop/java/index.html index acfe10b2d9..7636b301b1 100644 --- a/develop/java/index.html +++ b/develop/java/index.html @@ -4,7 +4,7 @@ Java and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Java and Redis

Explore the many different ways to build Java applications powered by Redis:

Java and Redis
Data-Driven Applications with Spring Boot and Redis
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/index.html b/develop/java/redis-and-spring-course/index.html index 339127d7fa..d9d3a7a475 100644 --- a/develop/java/redis-and-spring-course/index.html +++ b/develop/java/redis-and-spring-course/index.html @@ -4,7 +4,7 @@ Getting Started with Spring Data Redis | The Home of Redis Developers - + @@ -19,7 +19,7 @@ milestones in the form of Git branches that can help you pick up the project at any specific lesson.

Prerequisites

To get the most from this course, you'll need a machine that can run the application and the Redis server, which is provided as a Docker container. You'll also need the following installed on your machine:

Let's Learn Together

We're here to support your learning through a dedicated Discord channel that I'll be monitoring along with other teaching assistants. Join us on the Redis Discord server.

Course Contents

Create the skeleton for the course’s Spring Boot application.
Introducing Spring Data Redis.
Object Mapping & Redis Repositories.
User/Roles & Secondary Indexes.
Books, Categories & The Catalog.
Domain Models with Redis JSON.
Search with Redis Search.
Caching REST Services with Redis.
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_1/index.html b/develop/java/redis-and-spring-course/lesson_1/index.html index eb9e7eef40..de18026df2 100644 --- a/develop/java/redis-and-spring-course/lesson_1/index.html +++ b/develop/java/redis-and-spring-course/lesson_1/index.html @@ -4,7 +4,7 @@ Spring and Redis: Up and Running | The Home of Redis Developers - + @@ -31,7 +31,7 @@ This speed comes from the fact that it stores and serves all data from RAM rather than disk. Redis is durable, so your data will be persisted but all reads will be from a copy of the data held in RAM. This makes Redis an excellent choice for applications that require real time data access.

External Resources

Here's some resources that we think will be useful to you as you discover Redis:

  • redis.io - the official website of open source Redis.
  • Redis Enterprise Cloud - a fully managed cloud service from Redis with a free plan for getting started.
  • The official Redis Docker image.
  • For a comprehensive introduction to Redis, we recommend taking a look at the RU101: Introduction to Redis Data Structures course at Redis University. In this free online course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_2/index.html b/develop/java/redis-and-spring-course/lesson_2/index.html index b23635ef1c..4184d6e12d 100644 --- a/develop/java/redis-and-spring-course/lesson_2/index.html +++ b/develop/java/redis-and-spring-course/lesson_2/index.html @@ -4,7 +4,7 @@ Introducing Spring Data Redis | The Home of Redis Developers - + @@ -20,7 +20,7 @@ We can now write strings to Redis through our REST controller. Next, let’s add a corresponding GET method to our controller to read string values:

@GetMapping("/strings/{key}")
public Map.Entry<String, String> getString(@PathVariable("key") String key) {
String value = template.opsForValue().get(STRING_KEY_PREFIX + key);

if (value == null) {
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "key not found");
}

return new SimpleEntry<String, String>(key, value);
}

With imports:

import java.util.AbstractMap.SimpleEntry;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.server.ResponseStatusException;

We can now issue a GET request to retrieve String keys:

$ curl --location --request GET 'http://localhost:8080/api/redis/strings/database:redis:creator'
{"database:redis:creator":"Salvatore Sanfilippo"}

On the Redis CLI monitor you should see:

1617347871.901585 [0 172.19.0.1:58284] "GET" "redi2read:strings:database:redis:creator"

Note that in order to return an error on a key not found, we have to check the result for null and throw an appropriate exception.

{
"timestamp": "2021-04-02T07:45:10.303+00:00",
"status": 404,
"error": "Not Found",
"trace": "org.springframework.web.server.ResponseStatusException: 404...\n",
"message": "key not found",
"path": "/api/redis/strings/database:neo4j:creator"
}

Keep in mind that this is a “development time” exception, appropriate to be shown on an error page meant for developers. Likely, we would intercept this exception and create an API appropriate response (likely just the status and error fields above).

- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_3/index.html b/develop/java/redis-and-spring-course/lesson_3/index.html index c222492763..4f24e5d95f 100644 --- a/develop/java/redis-and-spring-course/lesson_3/index.html +++ b/develop/java/redis-and-spring-course/lesson_3/index.html @@ -4,7 +4,7 @@ Object Mapping & Redis Repositories | The Home of Redis Developers - + @@ -34,7 +34,7 @@ with the customary log level logging methods. On server start we should now see, once, the output of our database seeding.

2021-04-02 19:28:25.367  INFO 94971 --- [  restartedMain] c.r.edu.redi2read.Redi2readApplication   : Started Redi2readApplication in 2.146 seconds (JVM running for 2.544)
2021-04-02 19:28:25.654 INFO 94971 --- [ restartedMain] c.r.edu.redi2read.boot.CreateRoles : >>>> Created admin and customer roles...

Let’s use the Redis CLI to explore how the Roles were stored, let’s use the KEYS command passing the Role fully qualified class name and a wildcard. Resulting in:

127.0.0.1:6379> KEYS com.redislabs.edu.redi2read.models.Role*
1) "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
2) "com.redislabs.edu.redi2read.models.Role:9d383baf-35a0-4d20-8296-eedc4bea134a"
3) "com.redislabs.edu.redi2read.models.Role"

The first two values are Hashes, actual instances of the Role class. The string after the : is the primary key of the individual Role. Let’s inspect one of those hashes:

127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
hash
127.0.0.1:6379> HGETALL "com.redislabs.edu.redi2read.models.Role:c4219654-0b79-4ee6-b928-cb75909c4464"
1) "_class"
2) "com.redislabs.edu.redi2read.models.Role"
3) "id"
4) "c4219654-0b79-4ee6-b928-cb75909c4464"
5) "name"
6) "admin"

Using the TYPE command returns, as expected that the value under the key is a Redis Hash. We use the HGETALL to “Get All” values in the Hash. The _class is a metadata field which demarks the class of the object stored in the Hash. Now let’s inspect the third value in the KEYS list:

127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Role"
set
127.0.0.1:6379> SMEMBERS "com.redislabs.edu.redi2read.models.Role"
1) "9d383baf-35a0-4d20-8296-eedc4bea134a"
2) "c4219654-0b79-4ee6-b928-cb75909c4464"

The Redis Set under the mapped class name is used to keep the primary keys maintained for a given class.

- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_4/index.html b/develop/java/redis-and-spring-course/lesson_4/index.html index 842de5e859..4124862c95 100644 --- a/develop/java/redis-and-spring-course/lesson_4/index.html +++ b/develop/java/redis-and-spring-course/lesson_4/index.html @@ -4,7 +4,7 @@ User/Roles & Secondary Indexes | The Home of Redis Developers - + @@ -36,7 +36,7 @@ We wrap the result in a list to match the result type of the method. We use Optional to handle a null result from the finder. And don’t forget your imports:

import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.springframework.web.bind.annotation.RequestParam;

Invoking the endpoint with curl:

curl --location --request GET 'http://localhost:8080/api/users/?email=donald.gibson@example.com'

Returns the expected result:

[
{
"id": "-1266125356844480724",
"name": "Donald Gibson",
"email": "donald.gibson@example.com",
"roles": [
{
"id": "a9f9609f-c173-4f48-a82d-ca88b0d62d0b",
"name": "customer"
}
]
}
]
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_5/index.html b/develop/java/redis-and-spring-course/lesson_5/index.html index 30b14e8065..7ca6e6730e 100644 --- a/develop/java/redis-and-spring-course/lesson_5/index.html +++ b/develop/java/redis-and-spring-course/lesson_5/index.html @@ -4,7 +4,7 @@ Books, Categories & The Catalog | The Home of Redis Developers - + @@ -38,7 +38,7 @@ In the file src/main/resources/application.properties add the following values:

app.numberOfRatings=5000
app.ratingStars=5

Implementing Pagination with All Books

Pagination is helpful when we have a large dataset and want to present it to the user in smaller chunks. As we learned earlier in the lesson, the BookRepository extends the PagingAndSortingRepository, which is built on top of the CrudRepository. In this section, we will refactor the BookController all method to work with the pagination features of the PagingAndSortingRepository. Replace the previously created all method with the following contents:

  @GetMapping
public ResponseEntity<Map<String, Object>> all( //
@RequestParam(defaultValue = "0") Integer page, //
@RequestParam(defaultValue = "10") Integer size //
) {
Pageable paging = PageRequest.of(page, size);
Page<Book> pagedResult = bookRepository.findAll(paging);
List<Book> books = pagedResult.hasContent() ? pagedResult.getContent() : Collections.emptyList();

Map<String, Object> response = new HashMap<>();
response.put("books", books);
response.put("page", pagedResult.getNumber());
response.put("pages", pagedResult.getTotalPages());
response.put("total", pagedResult.getTotalElements());

return new ResponseEntity<>(response, new HttpHeaders(), HttpStatus.OK);
}

Let’s break down the refactoring:

  • We want to control the method return value so we’ll use a ResponseEntity, which is an extension of HttpEntity and gives us control over the HTTP status code, headers, and body.
  • For the return type, we wrap a Map<String,Object> to return the collection of books as well as pagination data.
  • We’ve added two request parameters (HTTP query params) of type integer for the page number being retrieved and the size of the page. The page number defaults to 0 and the size of the page defaults to 10.
  • In the body of the method, we use the Pageable and PageRequest abstractions to construct the paging request.
  • We get a Page<Book> result by invoking the findAll method, passing the Pageable paging request.
  • If the returned page contains any items, we add them to the response object. Otherwise, we add an empty list.
  • The response is constructed by instantiating a Map and adding the books, current page, total number of pages, and total number of books.
  • Finally we package the response map into a ResponseEntity.

Let’s fire up a pagination request with curl as shown next:

curl --location --request GET 'http://localhost:8080/api/books/?size=25&page=2'

Passing a page size of 25 and requesting page number 2, we get the following:

{
"total": 2403,
"books": [
{
"id": "1786469960",
"title": "Data Visualization with D3 4.x Cookbook",
"subtitle": null,
"description": "Discover over 65 recipes to help you create breathtaking data visualizations using the latest features of D3...",
"language": "en",
"pageCount": 370,
"thumbnail": "http://books.google.com/books/content?id=DVQoDwAAQBAJ&printsec=frontcover&img=1&zoom=1&edge=curl&source=gbs_api",
"price": 22.39,
"currency": "USD",
"infoLink": "https://play.google.com/store/books/details?id=DVQoDwAAQBAJ&source=gbs_api",
"authors": [
"Nick Zhu"
],
"categories": [
{
"id": "f2ada1e2-7c18-4d90-bfe7-e321b650c0a3",
"name": "redis"
}
]
},
{
"id": "111871735X",
"title": "Android Programming",
"subtitle": "Pushing the Limits",
"description": "Unleash the power of the Android OS and build the kinds ofbrilliant, innovative apps users love to use ...",
"language": "en",
"pageCount": 432,
"thumbnail": "http://books.google.com/books/content?id=SUWPAQAAQBAJ&printsec=frontcover&img=1&zoom=1&edge=curl&source=gbs_api",
"price": 30.0,
"currency": "USD",
"infoLink": "https://play.google.com/store/books/details?id=SUWPAQAAQBAJ&source=gbs_api",
"authors": [
"Erik Hellman"
],
"categories": [
{
"id": "47d9a769-bbc2-4068-b27f-2b800bec1565",
"name": "kotlin"
}
]
},
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_6/index.html b/develop/java/redis-and-spring-course/lesson_6/index.html index bfc1f42129..727cdcafa6 100644 --- a/develop/java/redis-and-spring-course/lesson_6/index.html +++ b/develop/java/redis-and-spring-course/lesson_6/index.html @@ -4,7 +4,7 @@ Domain Models with Redis | The Home of Redis Developers - + @@ -31,7 +31,7 @@ We search for the cart by ID.

  • If we find the cart, we search for the index of the item to be removed in the array of cart items.
  • If we find the item, we use the JSON.ARRPOP command to remove the item by its index at the JSONPath expression “.cartItems”.
public void removeFromCart(String id, String isbn) {
Optional<Cart> cartFinder = cartRepository.findById(id);
if (cartFinder.isPresent()) {
Cart cart = cartFinder.get();
String cartKey = CartRepository.getKey(cart.getId());
List<CartItem> cartItems = new ArrayList<CartItem>(cart.getCartItems());
OptionalLong cartItemIndex = LongStream.range(0, cartItems.size()).filter(i -> cartItems.get((int) i).getIsbn().equals(isbn)).findFirst();
if (cartItemIndex.isPresent()) {
redisJson.arrPop(cartKey, CartItem.class, cartItemsPath, cartItemIndex.getAsLong());
}
}
}

Generating Random Carts

We now have all the pieces in place to create a CommandLineRunner that can generate random carts for our users. As done previously, we will set the number of carts generated using an application property. To do so, add the following to the file src/main/resources/application.properties:

app.numberOfCarts=2500

The CreateCarts CommandLineRunner is shown below. Add it to the boot package.

package com.redislabs.edu.redi2read.boot;

import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.stream.IntStream;

import com.redislabs.edu.redi2read.models.Book;
import com.redislabs.edu.redi2read.models.Cart;
import com.redislabs.edu.redi2read.models.CartItem;
import com.redislabs.edu.redi2read.models.User;
import com.redislabs.edu.redi2read.repositories.BookRepository;
import com.redislabs.edu.redi2read.repositories.CartRepository;
import com.redislabs.edu.redi2read.services.CartService;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.core.annotation.Order;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import lombok.extern.slf4j.Slf4j;

@Component
@Order(5)
@Slf4j
public class CreateCarts implements CommandLineRunner {

@Autowired
private RedisTemplate<String, String> redisTemplate;

@Autowired
CartRepository cartRepository;

@Autowired
BookRepository bookRepository;

@Autowired
CartService cartService;

@Value("${app.numberOfCarts}")
private Integer numberOfCarts;

@Override
public void run(String... args) throws Exception {
if (cartRepository.count() == 0) {
Random random = new Random();

// loops for the number of carts to create
IntStream.range(0, numberOfCarts).forEach(n -> {
// get a random user
String userId = redisTemplate.opsForSet()//
.randomMember(User.class.getName());

// make a cart for the user
Cart cart = Cart.builder()//
.userId(userId) //
.build();

// get between 1 and 7 books
Set<Book> books = getRandomBooks(bookRepository, 7);

// add to cart
cart.setCartItems(getCartItemsForBooks(books));

// save the cart
cartRepository.save(cart);

// randomly checkout carts
if (random.nextBoolean()) {
cartService.checkout(cart.getId());
}
});

log.info(">>>> Created Carts...");
}
}

private Set<Book> getRandomBooks(BookRepository bookRepository, int max) {
Random random = new Random();
int howMany = random.nextInt(max) + 1;
Set<Book> books = new HashSet<Book>();
IntStream.range(1, howMany).forEach(n -> {
String randomBookId = redisTemplate.opsForSet().randomMember(Book.class.getName());
books.add(bookRepository.findById(randomBookId).get());
});

return books;
}

private Set<CartItem> getCartItemsForBooks(Set<Book> books) {
Set<CartItem> items = new HashSet<CartItem>();
books.forEach(book -> {
CartItem item = CartItem.builder()//
.isbn(book.getId()) //
.price(book.getPrice()) //
.quantity(1L) //
.build();
items.add(item);
});

return items;
}
}

Let’s break down the CreateCarts class:

  • As with other CommandLineRunners, we check that there are no carts created.
  • For each cart to be created, we
  • Retrieve a random user.
  • Create a cart for the user.
  • Retrieve between 1 and 7 books.
  • Add the cart items to the cart for the retrieved books.
  • Randomly “checkout” the cart.

There are two private utility methods at the bottom of the class to get a random number of books and to create cart items from a set of books. Upon server start (after some CPU cycles) you should see:

2021-04-04 14:58:08.737  INFO 31459 --- [  restartedMain] c.r.edu.redi2read.boot.CreateCarts       : >>>> Created Carts...

We can now use the Redis CLI to get a random cart key from the cart set, check the type of one of the keys (ReJSON-RL) and use the JSON.GET command to retrieve the JSON payload:

127.0.0.1:6379> SRANDMEMBER "com.redislabs.edu.redi2read.models.Cart"
"com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
127.0.0.1:6379> TYPE "com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
ReJSON-RL
127.0.0.1:6379> JSON.GET "com.redislabs.edu.redi2read.models.Cart:dcd6a6c3-59d6-43b4-8750-553d159cdeb8"
"{\"id\":\"dcd6a6c3-59d6-43b4-8750-553d159cdeb8\",\"userId\":\"-3356969291827598172\",\"cartItems\":[{\"isbn\":\"1784391093\",\"price\":17.190000000000001,\"quantity\":1},{\"isbn\":\"3662433524\",\"price\":59.990000000000002,\"quantity\":1}]}"

The Cart Controller

The CartController is mostly a pass-through to the CartService (as controllers are intended to be).

package com.redislabs.edu.redi2read.controllers;

import com.redislabs.edu.redi2read.models.Cart;
import com.redislabs.edu.redi2read.models.CartItem;
import com.redislabs.edu.redi2read.services.CartService;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

@RestController
@RequestMapping("/api/carts")
public class CartController {

@Autowired
private CartService cartService;

@GetMapping("/{id}")
public Cart get(@PathVariable("id") String id) {
return cartService.get(id);
}

@PostMapping("/{id}")
public void addToCart(@PathVariable("id") String id, @RequestBody CartItem item) {
cartService.addToCart(id, item);
}

@DeleteMapping("/{id}")
public void removeFromCart(@PathVariable("id") String id, @RequestBody String isbn) {
cartService.removeFromCart(id, isbn);
}

@PostMapping("/{id}/checkout")
public void checkout(@PathVariable("id") String id) {
cartService.checkout(id);
}

}

Let’s use curl to request a cart by its ID:

curl --location --request GET 'http://localhost:8080/api/carts/dcd6a6c3-59d6-43b4-8750-553d159cdeb8'

Which should return a payload like:

{
"id": "dcd6a6c3-59d6-43b4-8750-553d159cdeb8",
"userId": "-3356969291827598172",
"cartItems": [
{
"isbn": "1784391093",
"price": 17.19,
"quantity": 1
},
{
"isbn": "3662433524",
"price": 59.99,
"quantity": 1
}
],
"total": 77.18
}
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_7/index.html b/develop/java/redis-and-spring-course/lesson_7/index.html index 0538a466d6..99516864ec 100644 --- a/develop/java/redis-and-spring-course/lesson_7/index.html +++ b/develop/java/redis-and-spring-course/lesson_7/index.html @@ -4,7 +4,7 @@ Search with Redis | The Home of Redis Developers - + @@ -42,7 +42,7 @@ Unlike search indexes, which RediSearch maintains automatically, you maintain suggestion dictionaries manually using FT.SUGADD and FT.SUGDEL. Add the property for the name of the auto-complete dictionary to src/main/resources/application.properties:

app.autoCompleteKey=author-autocomplete

Add the file src/main/java/com/redislabs/edu/redi2read/boot/CreateAuthorNameSuggestions.java with the following contents:

package com.redislabs.edu.redi2read.boot;

import com.redislabs.edu.redi2read.repositories.BookRepository;
import com.redislabs.lettusearch.RediSearchCommands;
import com.redislabs.lettusearch.StatefulRediSearchConnection;
import com.redislabs.lettusearch.Suggestion;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.core.annotation.Order;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import lombok.extern.slf4j.Slf4j;

@Component
@Order(7)
@Slf4j
public class CreateAuthorNameSuggestions implements CommandLineRunner {

@Autowired
private RedisTemplate<String, String> redisTemplate;

@Autowired
private BookRepository bookRepository;

@Autowired
private StatefulRediSearchConnection<String, String> searchConnection;

@Value("${app.autoCompleteKey}")
private String autoCompleteKey;

@Override
public void run(String... args) throws Exception {
if (!redisTemplate.hasKey(autoCompleteKey)) {
RediSearchCommands<String, String> commands = searchConnection.sync();
bookRepository.findAll().forEach(book -> {
if (book.getAuthors() != null) {
book.getAuthors().forEach(author -> {
Suggestion<String> suggestion = Suggestion.builder(author).score(1d).build();
commands.sugadd(autoCompleteKey, suggestion);
});
}
});

log.info(">>>> Created Author Name Suggestions...");
}
}
}

Let’s break down the logic of the CreateAuthorNameSuggestions CommandLineRunner:

  • First, we guarantee a single execution by checking for the existence of the key for the auto-complete dictionary.
  • Then, using the BookRepository we loop over all books
  • For each author in a book we add a suggestion to the dictionary

To use the auto-suggestion feature in the controller, we can add a new method:

@Value("${app.autoCompleteKey}")
private String autoCompleteKey;

@GetMapping("/authors")
public List<Suggestion<String>> authorAutoComplete(@RequestParam(name="q")String query) {
RediSearchCommands<String, String> commands = searchConnection.sync();
SuggetOptions options = SuggetOptions.builder().max(20L).build();
return commands.sugget(autoCompleteKey, query, options);
}

With imports:

import com.redislabs.lettusearch.Suggestion;
import com.redislabs.lettusearch.SuggetOptions;

In the authorAutoComplete method, we use the FT.SUGGET command (via the sugget method from the RediSearchCommands object) and build a query using a SuggetOptions configuration. In the example above, we set the maximum number of results to 20. We can use curl to craft a request to our new endpoint. In this example, I’m passing “brian s” as the query:

curl --location --request GET 'http://localhost:8080/api/books/authors/?q=brian%20s'

This results in a response with 2 JSON objects:

[
{
"string": "Brian Steele",
"score": null,
"payload": null
},
{
"string": "Brian Sam-Bodden",
"score": null,
"payload": null
}
]

If we add one more letter to our query to make it “brian sa”:

curl --location --request GET 'http://localhost:8080/api/books/authors/?q=brian%20sa'

We get the expected narrowing of the suggestion set:

[
{
"string": "Brian Sam-Bodden",
"score": null,
"payload": null
}
]
- + \ No newline at end of file diff --git a/develop/java/redis-and-spring-course/lesson_9/index.html b/develop/java/redis-and-spring-course/lesson_9/index.html index fe1e698246..00592acd3a 100644 --- a/develop/java/redis-and-spring-course/lesson_9/index.html +++ b/develop/java/redis-and-spring-course/lesson_9/index.html @@ -4,7 +4,7 @@ Caching REST Services with Redis | The Home of Redis Developers - + @@ -25,7 +25,7 @@ in the case of a cache hit, it will return its value. Otherwise, in case of a miss, it will store the cache’s search method’s return value, allowing the method to execute as if there was no cache at all. If we try the request http://localhost:8080/api/books/search?q=java:

curl --location --request GET 'http://localhost:8080/api/books/search?q=java'

On the first request we get a 28 ms response time:

PostMan Request 2

Subsequent responses return in the range of 8 ms to 10 ms consistently:

PostMan Request 1

- + \ No newline at end of file diff --git a/develop/java/spring/index.html b/develop/java/spring/index.html index 56891eb084..72e4f68e72 100644 --- a/develop/java/spring/index.html +++ b/develop/java/spring/index.html @@ -4,7 +4,7 @@ Spring and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/index.html b/develop/java/spring/rate-limiting/fixed-window/index.html index 0cf28de9a0..eddd74e7ba 100644 --- a/develop/java/spring/rate-limiting/fixed-window/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/index.html @@ -4,7 +4,7 @@ How to Implement Fixed Window Rate Limiting using Redis | The Home of Redis Developers - + @@ -26,7 +26,7 @@ per minute.

This basic recipe using Redis Strings, a minute-size window and a quota of 20 requests is outlined on the Redis Blog. I'll summarize it here before we jump into out Spring Reactive implementation:

  1. GET [user-api-key]:[current minute number] such as GET "u123:45"
  2. If the result from line 1 is less than 20 (or the key is not found) go to step 4 otherwise continue to step 3
  3. Reject the request.
  4. In an atomic way (using MULTI and EXEC) increment the key and set the expiry to 59 seconds into the future.
MULTI
INCR [user-api-key]:[current minute number]
EXPIRE [user-api-key]:[current minute number] 59
EXEC
  1. Otherwise, fulfill the request.

Ok, now that we know the basic recipe, let's implement it in Spring

- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html index 8231caa36a..0f5272b478 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive-gears/index.html @@ -4,7 +4,7 @@ Atomicity with Gears | The Home of Redis Developers - + @@ -36,7 +36,7 @@ the key, the quota and the TTL seconds in the future.

As we've have done previously, if the function returns false we let the request through, otherwise we return an HTTP 429:

@Override
public Mono<ServerResponse> filter(ServerRequest request, HandlerFunction<ServerResponse> next) {
int currentMinute = LocalTime.now().getMinute();
String key = String.format("rl_%s:%s", requestAddress(request.remoteAddress()), currentMinute);

RedisGearsCommands<String, String> gears = connection.sync();

List<Object> results = gears.trigger("RateLimiter", key, Long.toString(maxRequestPerMinute), "59");
if (!results.isEmpty() && !Boolean.parseBoolean((String) results.get(0))) {
return next.handle(request);
} else {
return ServerResponse.status(TOO_MANY_REQUESTS).build();
}
}

Testing with curl

Once again, we use curl loop to test the limiter:

for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done

You should see the 21st request being rejected:

for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
PONG :: HTTP 200, 4 bytes, 0.064786 s
PONG :: HTTP 200, 4 bytes, 0.009926 s
PONG :: HTTP 200, 4 bytes, 0.009546 s
PONG :: HTTP 200, 4 bytes, 0.010189 s
PONG :: HTTP 200, 4 bytes, 0.009399 s
PONG :: HTTP 200, 4 bytes, 0.009210 s
PONG :: HTTP 200, 4 bytes, 0.008333 s
PONG :: HTTP 200, 4 bytes, 0.008009 s
PONG :: HTTP 200, 4 bytes, 0.008919 s
PONG :: HTTP 200, 4 bytes, 0.009271 s
PONG :: HTTP 200, 4 bytes, 0.007515 s
PONG :: HTTP 200, 4 bytes, 0.007057 s
PONG :: HTTP 200, 4 bytes, 0.008373 s
PONG :: HTTP 200, 4 bytes, 0.007573 s
PONG :: HTTP 200, 4 bytes, 0.008209 s
PONG :: HTTP 200, 4 bytes, 0.009080 s
PONG :: HTTP 200, 4 bytes, 0.007595 s
PONG :: HTTP 200, 4 bytes, 0.007955 s
PONG :: HTTP 200, 4 bytes, 0.007693 s
PONG :: HTTP 200, 4 bytes, 0.008743 s
:: HTTP 429, 0 bytes, 0.007226 s
:: HTTP 429, 0 bytes, 0.007388 s

If we run Redis in monitor mode, we should see the Lua calls to RG.TRIGGER and under that you should see the calls to GET, INCR and EXPIRE for allowed requests:

1631249244.006212 [0 172.17.0.1:56036] "RG.TRIGGER" "RateLimiter" "rl_localhost:47" "20" "59"
1631249244.006995 [0 ?:0] "GET" "rl_localhost:47"
1631249244.007182 [0 ?:0] "INCR" "rl_localhost:47"
1631249244.007269 [0 ?:0] "EXPIRE" "rl_localhost:47" "59"

And for rate limited request you should see only the call to GET:

1631249244.538478 [0 172.17.0.1:56036] "RG.TRIGGER" "RateLimiter" "rl_localhost:47" "20" "59"
1631249244.538809 [0 ?:0] "GET" "rl_localhost:47"

The complete code for this implementation is under the branch with_gears.

- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html index b518ed0e66..d726cbcba7 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive-lua/index.html @@ -4,7 +4,7 @@ Atomicity with Lua | The Home of Redis Developers - + @@ -31,7 +31,7 @@ hold the request quota.

@Value("${MAX_REQUESTS_PER_MINUTE}")
Long maxRequestPerMinute;

In our application.properties we'll set it to a max of 20 request per minute:

MAX_REQUESTS_PER_MINUTE=20

To invoke the filter we use the newly modified constructor, passing the template, the script, and the maxRequestPerMinute value:

@Bean
RouterFunction<ServerResponse> routes() {
return route() //
.GET("/api/ping", r -> ok() //
.contentType(TEXT_PLAIN) //
.body(BodyInserters.fromValue("PONG")) //
).filter(new RateLimiterHandlerFilterFunction(redisTemplate, script(), maxRequestPerMinute)).build();
}

Testing with curl

Using our trusty curl loop:

for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done

You should see the 21st request being rejected:

for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
PONG :: HTTP 200, 4 bytes, 0.173759 s
PONG :: HTTP 200, 4 bytes, 0.008903 s
PONG :: HTTP 200, 4 bytes, 0.008796 s
PONG :: HTTP 200, 4 bytes, 0.009625 s
PONG :: HTTP 200, 4 bytes, 0.007604 s
PONG :: HTTP 200, 4 bytes, 0.008052 s
PONG :: HTTP 200, 4 bytes, 0.011364 s
PONG :: HTTP 200, 4 bytes, 0.012158 s
PONG :: HTTP 200, 4 bytes, 0.010415 s
PONG :: HTTP 200, 4 bytes, 0.010373 s
PONG :: HTTP 200, 4 bytes, 0.010009 s
PONG :: HTTP 200, 4 bytes, 0.006587 s
PONG :: HTTP 200, 4 bytes, 0.006807 s
PONG :: HTTP 200, 4 bytes, 0.006970 s
PONG :: HTTP 200, 4 bytes, 0.007948 s
PONG :: HTTP 200, 4 bytes, 0.007949 s
PONG :: HTTP 200, 4 bytes, 0.006606 s
PONG :: HTTP 200, 4 bytes, 0.006336 s
PONG :: HTTP 200, 4 bytes, 0.007855 s
PONG :: HTTP 200, 4 bytes, 0.006515 s
:: HTTP 429, 0 bytes, 0.006633 s
:: HTTP 429, 0 bytes, 0.008264 s

If we run Redis in monitor mode, we should see the Lua calls to EVALSHA, followed by the call to GET for a rejected request, and the same plus calls to INCR and EXPIRE for an allowed request:

1630342834.878972 [0 172.17.0.1:65008] "EVALSHA" "16832548450a4b1c5e23ffab55bddefe972fecd2" "1" "rl_localhost:0" "20" "59"
1630342834.879044 [0 lua] "GET" "rl_localhost:0"
1630342834.879091 [0 lua] "INCR" "rl_localhost:0"
1630342834.879141 [0 lua] "EXPIRE" "rl_localhost:0" "59"
1630342835.401937 [0 172.17.0.1:65008] "EVALSHA" "16832548450a4b1c5e23ffab55bddefe972fecd2" "1" "rl_localhost:0" "20" "59"
1630342835.402009 [0 lua] "GET" "rl_localhost:0"

The complete code for this implementation is under the branch with_lua.

- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/fixed-window/reactive/index.html b/develop/java/spring/rate-limiting/fixed-window/reactive/index.html index 0060d1260c..7153848e9a 100644 --- a/develop/java/spring/rate-limiting/fixed-window/reactive/index.html +++ b/develop/java/spring/rate-limiting/fixed-window/reactive/index.html @@ -4,7 +4,7 @@ Reactive Implementation | The Home of Redis Developers - + @@ -45,7 +45,7 @@ curl flags used are as follows; first is -s that silences curl (makes it hide progress bar and errors), -w is the write out options in which we can pass a string with interpolated variables. Then we sleep 1/2 second between cycles.

for n in {1..22}; do echo $(curl -s -w " :: HTTP %{http_code}, %{size_download} bytes, %{time_total} s" -X GET http://localhost:8080/api/ping); sleep 0.5; done
PONG :: HTTP 200, 4 bytes, 0.393156 s
PONG :: HTTP 200, 4 bytes, 0.019530 s
PONG :: HTTP 200, 4 bytes, 0.023677 s
PONG :: HTTP 200, 4 bytes, 0.019922 s
PONG :: HTTP 200, 4 bytes, 0.025573 s
PONG :: HTTP 200, 4 bytes, 0.018916 s
PONG :: HTTP 200, 4 bytes, 0.019548 s
PONG :: HTTP 200, 4 bytes, 0.018335 s
PONG :: HTTP 200, 4 bytes, 0.010105 s
PONG :: HTTP 200, 4 bytes, 0.008416 s
PONG :: HTTP 200, 4 bytes, 0.009829 s
PONG :: HTTP 200, 4 bytes, 0.011766 s
PONG :: HTTP 200, 4 bytes, 0.010809 s
PONG :: HTTP 200, 4 bytes, 0.015483 s
PONG :: HTTP 200, 4 bytes, 0.009732 s
PONG :: HTTP 200, 4 bytes, 0.009970 s
PONG :: HTTP 200, 4 bytes, 0.008696 s
PONG :: HTTP 200, 4 bytes, 0.009176 s
PONG :: HTTP 200, 4 bytes, 0.009678 s
PONG :: HTTP 200, 4 bytes, 0.012497 s
:: HTTP 429, 0 bytes, 0.010071 s
:: HTTP 429, 0 bytes, 0.006625 s

If we run Redis in monitor mode, we should see the call to GET for a rejected request, and the same plus calls to INCR and EXPIRE for an allowed request:

1630366639.188290 [0 172.17.0.1:65016] "GET" "rl_localhost:37"
1630366639.200956 [0 172.17.0.1:65016] "INCR" "rl_localhost:37"
1630366639.202372 [0 172.17.0.1:65016] "EXPIRE" "rl_localhost:37" "59"
...
1630366649.891110 [0 172.17.0.1:65016] "GET" "rl_localhost:37"
1630366650.417131 [0 172.17.0.1:65016] "GET" "rl_localhost:37"

You can find this example on the main branch at https://github.com/redis-developer/fixed-window-rate-limiter

- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/getting-started/index.html b/develop/java/spring/rate-limiting/getting-started/index.html index df0d76af02..f8b92dc835 100644 --- a/develop/java/spring/rate-limiting/getting-started/index.html +++ b/develop/java/spring/rate-limiting/getting-started/index.html @@ -4,7 +4,7 @@ How to implement Rate Limiting in Spring Applications using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to implement Rate Limiting in Spring Applications using Redis


Profile picture for Brian Sam-Bodden
Author:
Brian Sam-Bodden, Developer Advocate at Redis

In this series of mini-tutorials we'll explore several approaches to implement rate limiting in Spring applications using Redis. We’ll start with the most basic of Redis recipes and we’ll slowly increase the complexity of our implementations.

my image

What is Rate Limiting?

Rate Limiting entails techniques to regulate the number of requests a particular client can make against a networked service. It caps the total number and/or the frequency of requests.

Why do we need Rate Limiting?

There are many reasons why you would want to add a rate limiter to your APIs, whether it is to prevent intentional or accidental API abuse, a rate limiter can stop the invaders at the gate. Let’s think of some scenarios where a rate limiter could save your bacon:

  • If you ever worked at an API-based startup, you know that to get anywhere you need a “free” tier. A free tier will get potential customers to try your service and spread the word. But without limiting the free tier users you could risk losing the few paid customers your startup has.
  • Programmatic integrations with your API could have bugs. Sometimes resource starvation is not caused by a malicious attack. These FFDoS (Friendly-Fire Denial of Service) attacks happen more often than you can imagine.
  • Finally, there are malicious players recruiting bots on a daily basis to make API providers’ lives miserable. Being able to detect and curtail those attacks before they impact your users could mean the life of our business.

Rate limiting is typically implemented on the server-side but if you have control of the clients you can also preempt certain types of access at that point. It relies on three particular pieces of information:

  1. Who’s making the request: Identifying the source of the attack or abuse is the most important part of the equation. If the offending requests cannot be grouped and associated with a single entity you’ll be fighting in the dark.
  2. What’s the cost of the request: Not all requests are created equal, for example, a request that’s bound to a single account’s data, likely can only cause localized havoc, while a requests that spans multiple accounts, and/or broad spans of time (like multiple years) are much more expensive
  3. What is their allotted quota: How many total requests and/or what’s the rate of requests permitted for the user. For example, in the case of the "free tier" you might have a smaller allotment/bucket of requests they can make, or you migth reduce them during certain peek times.

Why Redis for Rate Limiting?

Redis is especially positioned as a platform to implement rate limiting for several reasons:

  • Speed: The checks and calculations required by a rate limiting implementation will add the total request-response times of your API, you want those operations to happen as fast as possible.
  • Centralization and distribution: Redis can seamlessly scale your single server/instance setup to hundreds of nodes without sacrificing performance or reliability.
  • The Right Abstractions: Redis provides optimized data structures to support several of the most common rate limiter implementations and with i’s built-in TTL (time-to-live controls) it allows for efficient management of memory. Counting things is a built-in feature in Redis and one of the many areas where Redis shines above the competition.

Now, let’s get started with our first implementation; the simple “Fixed Window” implementation.

Further Reading

- + \ No newline at end of file diff --git a/develop/java/spring/rate-limiting/index.html b/develop/java/spring/rate-limiting/index.html index 92ab954650..d8c3b5fa62 100644 --- a/develop/java/spring/rate-limiting/index.html +++ b/develop/java/spring/rate-limiting/index.html @@ -4,7 +4,7 @@ Rate Limiting with Spring and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Rate Limiting with Spring and Redis

The following links provides you with the available options to develop your application using NodeJS and Redis

Rate Limiting with Spring and Redis
Cap the maximum number of requests in a fixed window of time
Improving atomicity and performance with Lua
Improving atomicity and performance with Triggers and Functions
- + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring-hash/index.html b/develop/java/spring/redis-om/redis-om-spring-hash/index.html index 49bdc804db..b053008eb9 100644 --- a/develop/java/spring/redis-om/redis-om-spring-hash/index.html +++ b/develop/java/spring/redis-om/redis-om-spring-hash/index.html @@ -4,7 +4,7 @@ Enhanced Mapping of Java Objects to Hashes | The Home of Redis Developers - + @@ -52,7 +52,7 @@ a query declaration to the repository interface like:

List<User> findByFirstNameAndLastName(String firstName, String lastName);

In this case method findByFirstNameAndLastName is parsed and the And keyword is used to determine that the method is expecting 2 parameters; firstName and lastName.

To test it we could add the following to our controller:

@GetMapping("/q")
public List<User> findByName(@RequestParam String firstName, @RequestParam String lastName) {
return userRepository.findByFirstNameAndLastName(firstName, lastName);
}

Using CURL to test we

curl --location --request GET 'http://localhost:8080/api/users/q?firstName=Brad&lastName=Wilk'
[{"id":"01FNTE5KWCZ5H438JGB4AZWE85","firstName":"Brad","middleName":null,"lastName":"Wilk","email":"brad@ratm.com"}]

Formatting the resulting JSON we can see the record for Brad Wilk is returned as the only element of the JSON Array result:

[
{
"id": "01FNTE5KWCZ5H438JGB4AZWE85",
"firstName": "Brad",
"middleName": null,
"lastName": "Wilk",
"email": "brad@ratm.com"
}
]

Back on the Redis CLI monitor we can see the query generated by our repository method:

1638343589.454213 [0 172.19.0.1:63406] "FT.SEARCH" "UserIdx" "@firstName:{Brad} @lastName:{Wilk} "

Redis OM Spring, extends Spring Data Redis with search capabilities that rival the flexibility of JPA queries by using Redis' native Search and Query engine.

- + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring-json/index.html b/develop/java/spring/redis-om/redis-om-spring-json/index.html index 3c323e294f..eae5a51e0c 100644 --- a/develop/java/spring/redis-om/redis-om-spring-json/index.html +++ b/develop/java/spring/redis-om/redis-om-spring-json/index.html @@ -4,7 +4,7 @@ Mapping Java Objects to JSON | The Home of Redis Developers - + @@ -71,7 +71,7 @@ logic and numerical operators like between, startingWith, greaterThan, lessThanOrEquals and many more.

Below are some more examples of what's possible:

// find by numeric property
Iterable<Company> findByNumberOfEmployees(int noe);

// find by numeric property range
Iterable<Company> findByNumberOfEmployeesBetween(int noeGT, int noeLT);

// starting with/ending with
Iterable<Company> findByNameStartingWith(String prefix);

What's Next

This was but a brief tour of the capabilities of Redis OM Spring (ROMS). In the next installment we'll cover how ROMS extends Spring Data Redis Redis Hash mapping to make it even better.

- + \ No newline at end of file diff --git a/develop/java/spring/redis-om/redis-om-spring/index.html b/develop/java/spring/redis-om/redis-om-spring/index.html index 309d5c747b..2e1268456e 100644 --- a/develop/java/spring/redis-om/redis-om-spring/index.html +++ b/develop/java/spring/redis-om/redis-om-spring/index.html @@ -4,7 +4,7 @@ Redis OM - Spring | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis OM - Spring


Profile picture for Brian Sam-Bodden
Author:
Brian Sam-Bodden, Developer Advocate at Redis

Introduction

The aim of the Redis OM family of projects is to provide high-level abstractions idiomatically implemented for your language/platform of choice. We currently cater to the Node.js, Python, .NET and Spring communities.

The Spring Framework is the leading full-stack Java/JEE application framework and Redis OM Spring (ROMS) goal is to enable developers to easily add the power of Redis to their Spring Boot applications.

Redis OM Spring provides powerful repository and custom object-mapping abstractions built on top of the amazing Spring Data Redis (SDR) framework.

The current preview release provides all of SDRs capabilities plus:

  • A @Document annotation to map Spring Data models to Redis JSON documents
  • Enhancements to SDR's @RedisHash via @EnableRedisEnhancedRepositories to:
    • use Redis' native search engine (Redis Search) for secondary indexing
    • use ULID indentifiers for @Id annotated fields
  • RedisDocumentRepository with automatic implementation of Repository interfaces for complex querying capabilities using @EnableRedisDocumentRepositories
  • Declarative Search Indices via @Indexable
  • Full-text Search Indices via @Searchable
  • @Bloom annotation to determine very quickly, and with high degree of certainty, whether a value is in a collection

Tutorials to get you started

Mapping Java Objects to JSON
Enhanced Mapping of Java Objects to Hashes
- + \ No newline at end of file diff --git a/develop/node/gettingstarted/index.html b/develop/node/gettingstarted/index.html index 52381f8757..fb6e43604c 100644 --- a/develop/node/gettingstarted/index.html +++ b/develop/node/gettingstarted/index.html @@ -4,7 +4,7 @@ Getting Started with Node and Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ This application calls the GitHub API and caches the results into Redis.

Redis Rate-Limiting This is a very simple app that demonstrates rate-limiting feature using Redis.

Notifications with WebSocket, Vue & Redis This project allows you to push notifications in a Vue application from a Redis PUBLISH using WebSockets.

Technical Articles & Videos

Redis Rapid Tips: ioredis (YouTube)

Mapping Objects between Node and Redis (YouTube)


Redis University

Redis for JavaScript Developers

Build full-fledged Redis applications with Node.js and Express.

- + \ No newline at end of file diff --git a/develop/node/index.html b/develop/node/index.html index 19699a4140..647ed3372b 100644 --- a/develop/node/index.html +++ b/develop/node/index.html @@ -4,7 +4,7 @@ NodeJS and Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

NodeJS and Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

The following links provides you with the available options to develop your application using NodeJS and Redis

Node.js and Redis
Node.js Crash Course
Build a simple service with Redis OM for Node.js and Express
- + \ No newline at end of file diff --git a/develop/node/node-crash-course/index.html b/develop/node/node-crash-course/index.html index 7c4e96255c..0603838711 100644 --- a/develop/node/node-crash-course/index.html +++ b/develop/node/node-crash-course/index.html @@ -4,7 +4,7 @@ The Node.js Crash Course | The Home of Redis Developers - + @@ -15,7 +15,7 @@ the ioredis client and Redis Stack.

In this course, you'll learn about using Redis with Node.js through a blend of video and text-based training. You can also get hands-on with some optional workshop exercises where you'll add new functionality to an existing Node.js application.

Welcome to the Node.js Redis Crash Course!
Introducing Redis - the database that developers love!
The ioredis client for Node.js.
A visual tool for managing Redis.
Introducing the sample application.
Install, configure and run the sample application.
Modeling domain objects with Redis Hashes.
Extending the capabilities of Redis.
Storing JSON documents in Redis.
Indexing and querying Hashes with Redis as a Search and Query engine.
Using Redis Streams to handle a data fire hose.
How Redis helps your Node.js application to scale.
Using Redis as a cache to speed up user experience.
Horizontal scaling with Redis as a session store.
Consumer groups - collaborative stream processing.
A look at probabilistic data structures in Redis.
Wrap up and next steps.
- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/advancedstreams/index.html b/develop/node/nodecrashcourse/advancedstreams/index.html index 0a5d0a2bce..fd54e01a1e 100644 --- a/develop/node/nodecrashcourse/advancedstreams/index.html +++ b/develop/node/nodecrashcourse/advancedstreams/index.html @@ -4,7 +4,7 @@ Advanced Streams: Parallel Processing Checkins with Consumer Groups | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Advanced Streams: Parallel Processing Checkins with Consumer Groups


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

As our application grows in popularity and our user base increases, we're receiving more and more checkins. Recall that checkins are added to a Redis Stream by the Checkin Receiver, and read from that stream by the Checkin Processor. The Stream acts as a buffer between these two components:

Stream Overview

Unfortunately, our single Checkin Processor is struggling to keep up with the volume of new checkins. This means that we're seeing longer and longer lag times between a checkin arriving in the Stream and its values being reflected in our user and location Hashes.

And, we can't run more than one instance of the Checkin Processor, as each instance will consume the whole Stream. What we need is a way for multiple instances of the same consumer code to collaboratively process entries from a Stream.

Redis Streams offers consumer groups as a solution for this. We can think of a consumer group as a single logical consumer that reads the entire Stream, spreading the work out between individual consumers in the group:

Stream Overview

Redis tracks which messages have been delivered to which consumers in the group, ensuring that each consumer receives its own unique subset of the Stream to process. This allows for parallel processing of the Stream by multiple consumer processes. As you'll see in the video, this requires us to rethink our processing logic to allow Stream entries to be processed out of order, and to avoid race conditions when updating user and location Hashes. We'll use the Lua interpreter built into Redis to help here.

Hands-on Exercise

In this exercise, you'll run multiple concurrent instances of the Checkin Group Processor so that you can see how they work together to collaboratively process the Stream.

If you're still running the Checkin Processor service, stop it with Ctrl-C.

Next, open up two terminal windows. cd to the node-js-crash-course folder that you cloned the GitHub repo into in both windows.

In one terminal, start an instance of the Checkin Group Processor that we'll call consumer1:

$ npm run checkingroupprocessor consumer1

> js-crash-course@0.0.1 checkingroupprocessor
> node ./src/checkingroupprocessor.js "consumer1"

info: consumer1: Starting up.
info: consumer1: Processing checkin 1609602085397-0.
debug: consumer1: Processing 1609602085397-0.
debug: consumer1: Updating user ncc:users:789 and location ncc:locations:171.
info: consumer1: Acknowledged processing of checkin 1609602085397-0.
info: consumer1: Pausing to simulate work.
info: consumer1: Processing checkin 1609604227545-0.
debug: consumer1: Processing 1609604227545-0.
debug: consumer1: Updating user ncc:users:752 and location ncc:locations:100.
info: consumer1: Acknowledged processing of checkin 1609604227545-0.
info: consumer1: Pausing to simulate work.
info: consumer1: Processing checkin 1609605397408-0.
debug: consumer1: Processing 1609605397408-0.
debug: consumer1: Updating user ncc:users:180 and location ncc:locations:13.
info: consumer1: Acknowledged processing of checkin 1609605397408-0.
info: consumer1: Pausing to simulate work.
info: consumer1: Processing checkin 1609605876514-0.
...

In the second terminal, start another instance of the Checkin Group Processor, consumer2:

$ npm run checkingroupprocessor consumer2

> js-crash-course@0.0.1 checkingroupprocessor
> node ./src/checkingroupprocessor.js "consumer2"

info: consumer2: Starting up.
info: consumer2: Processing checkin 1609603711960-0.
debug: consumer2: Processing 1609603711960-0.
debug: consumer2: Updating user ncc:users:455 and location ncc:locations:181.
info: consumer2: Acknowledged processing of checkin 1609603711960-0.
info: consumer2: Pausing to simulate work.
info: consumer2: Processing checkin 1609604778689-0.
debug: consumer2: Processing 1609604778689-0.
debug: consumer2: Updating user ncc:users:102 and location ncc:locations:144.
info: consumer2: Acknowledged processing of checkin 1609604778689-0.
info: consumer2: Pausing to simulate work.
...

Look at the checkin IDs that each consumer processes. Note that they don't receive the same checkins. The Redis server gives each consumer in a group its own logical view of the Stream, each processing a subset of entries. This speeds up checkin processing as now we can have more than one consumer running at the same time.

Let’s take a look at some of the information Redis is tracking about our consumer group. Go ahead and stop both consumer processes by pressing Ctrl-C.

If you're using RedisInsight, open up the "Streams" browser, click the ncc:checkins key, and then select the "Consumer Groups" tab. You should see something like this:

RedisInsight Consumer Groups

This shows the number of consumers that are in the group, how many pending messages each has (a pending message is one that has been read by a consumer but not yet acknowledged with XACK), and the consumer's idle time since it last read from the Stream.

Click on "checkinConsumers" in the Consumer Group table to see a breakdown of pending messages and idle time for each consumer:

RedisInsight Consumer Groups Detail

In a real-world system, you could use this information to detect consumers that have encountered a problem processing entries. Redis Streams provides commands to reassign messages that a consumer has read but not acknowledged, allowing you to build consumer recovery strategies that re-allocate those messages to healthy consumer instances in the same group.

If you're using redis-cli rather than RedisInsight, you can see the same information using the XINFO and XPENDING commands:

127.0.0.1:6379> xinfo groups ncc:checkins
1) 1) "name"
2) "checkinConsumers"
3) "consumers"
4) (integer) 2
5) "pending"
6) (integer) 0
7) "last-delivered-id"
8) "1609605876514-0"
127.0.0.1:6379> xpending ncc:checkins checkinConsumers
1) (integer) 0
127.0.0.1:6379> xinfo consumers ncc:checkins checkinConsumers
1) 1) "name"
2) "consumer1"
3) "pending"
4) (integer) 0
5) "idle"
6) (integer) 2262454
2) 1) "name"
2) "consumer2"
3) "pending"
4) (integer) 0
5) "idle"
6) (integer) 2266244

External Resources

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/caching/index.html b/develop/node/nodecrashcourse/caching/index.html index d7ba33575f..030de4869e 100644 --- a/develop/node/nodecrashcourse/caching/index.html +++ b/develop/node/nodecrashcourse/caching/index.html @@ -4,7 +4,7 @@ Caching with Redis and Express Middleware | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Caching with Redis and Express Middleware


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

We want to provide our users with up to date weather for each of our locations… so we've partnered with a company that provides a weather API.

Our use of this API is metered and rate limited, so ideally we don't want to keep making the same requests to it over and over again. This is wasteful, could cost us money, and will slow down responses to our users.

Redis can be used as a cache to help here. Keys in Redis can be given an expiry time, after which Redis will delete them. We'll use this capability to cache the results of weather API calls as Redis Strings, keeping them for an hour to achieve a balance between users seeing the absolute latest weather report for a location and the load we're placing on the API provider's servers.

We'll use an extra Express middleware function to check if we've got the weather for a location in the Redis cache, and only go get it from the API provider if necessary.

Hands-on Exercise

In this exercise, you'll use Postman to see how caching with Redis makes an API call faster while also saving us from incurring costs associated with using a third-party service.

You'll be using the "/location/:locationId/weather" route for this exercise. The code for this route takes a location ID, retrieves that location's latitude and longitude from its Redis Hash, then calls the OpenWeather API with those coordinates to retrieve weather data as a JSON document.

This document is returned to the user and cached in Redis for an hour. Subsequent requests for the same location's weather within the hour are handled by the middleware function that sits in front of this route's logic. It returns the cached value without making a request to the OpenWeather servers.

To use OpenWeather's API, you'll first need to sign up for a free API key on their website.

Once you have your API key, stop the API server component (Ctrl-C), and set an environment variable containing your key as follows:

$ export WEATHER_API_KEY=my_api_key

Then start the server:

$ npm run dev

Now start Postman, and click the + button to create a new request:

Postman Plus Button

Set the URL to http://localhost:8081/api/location/99/weather and make sure you have a GET request selected from the dropdown of available HTTP verbs:

Postman set URL and verb

Click "Send", and you should see the weather report JSON for location 99 appear in the "Response" panel. Make a note of the overall response time that Postman reports for this request (509 milliseconds here):

Origin Request

Take a look at the output from the API server in your terminal window, you should see that no value for location 99 was found in the cache, so the data was requested from OpenWeather and then added to the cache in Redis:

debug: Cache miss for location 99 weather.

Click "Send" in Postman again to repeat the request... This time the response will be served from Redis and will be noticeably faster. No call to the OpenWeather API was made. Note the difference in response times when the result comes from cache (just 6 milliseconds here!):

Cached Request

Checking the output from the API server's terminal window shows that this request was served from cache:

debug: Cache hit for location 99 weather.

Finally, take a look at the cached data in Redis. Use RedisInsight or redis-cli to take a look at the key ncc:weather:99. The TTL command shows the number of seconds remaining before Redis deletes this key from the cache:

$ redis-cli
127.0.0.1:6379> GET ncc:weather:99
127.0.0.1:6379> TTL ncc:weather:99

If you're using RedisInsight, you can see the remaining TTL (in seconds) in the browser view:

TTL in Redis Insight

The key ncc:weather:99 will be deleted an hour after it was originally written to Redis, causing the next request after that deletion for location 99's weather to be a cache miss. If you want to speed up this process, delete the key ncc:weather:99 using the trash can icon in RedisInsight, or the DEL command in redis-cli:

127.0.0.1:6379> DEL ncc:weather:99

Then try your request in Postman again and see what happens.

External Resources

If you'd like to learn more about caching API responses in a Node.js application with Redis, check out Justin's excellent video:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/checkinswithstreams/index.html b/develop/node/nodecrashcourse/checkinswithstreams/index.html index 6f04d094a7..8219933abf 100644 --- a/develop/node/nodecrashcourse/checkinswithstreams/index.html +++ b/develop/node/nodecrashcourse/checkinswithstreams/index.html @@ -4,7 +4,7 @@ Processing Checkins with Redis Streams | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Processing Checkins with Redis Streams


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

The most common action that users perform with our system is checking in at a location. This part of the system needs to quickly capture checkins and scale independently of other components.

We decided to build a separate Express application just to receive checkin POST requests from users. This allows us to scale it separately from other API endpoints that deal with GET requests. To make our Checkin Receiver as fast as possible, we decided to do the actual work of processing checkins in a separate service. The Checkin Processor service reads checkins from the stream and updates the user and location Hashes in Redis.

Checkins are transient data in our system - as long as we process them all, we don't need to keep them around forever. It also makes sense to store them in the order that they arrive in the system.

Using a Redis Stream to store our checkin data is a natural fit for this use case. A Stream acts as a buffer between producer and consumer components. With Redis Streams, each entry in the stream is given a timestamp ID and the Stream is ordered by these IDs.

In our application, the Checkin Receiver Service is the producer and the Checkin Processor the consumer. We can represent this in a diagram like so:

Streams Overview

Using a Stream allows these components to operate at different speeds with no knowledge of each other. The Checkin Receiver simply adds a new entry to the Stream for each checkin it receives from a user, and the Checkin Processor reads the Stream and updates user and location Hashes at its own pace.

It's also possible to read a Redis Stream to find entries that were added in a specified time period between a start and end IDs. As our IDs are timestamps, this means that we can request data that was added in a given timeframe. We use this capability in the API Server component and in this module's coding exercise you'll get to extend this with new functionality.

Hands-on exercise

Take a moment to run the Checkin Processor component which reads checkins from the stream and updates user and location Hashes.

The sample data contains 5000 unprocessed checkins which the Checkin Processor will consume. The Checkin Processor keeps track of how far it has gotten in the stream by storing the ID of the last processed checkin in Redis. This way, when it's stopped and restarted it picks up from where it left off.

In a terminal window, cd to the node-js-crash-course folder that you cloned the GitHub repo to, and start the Checkin Processor:

$ npm run checkinprocessor delay

Adding delay introduces an artificial random processing time for each checkin. This slows the Checkin Processor down so that you can examine its output more easily. You should see it start up and begin processing checkins from the start of the stream at ID 0, which is the lowest possible stream entry ID:

$ npm run checkinprocessor delay

> js-crash-course@0.0.1 checkinprocessor
> node ./src/checkinprocessor.js -- "delay"

info: Reading stream from last ID 0.
debug: Updating user ncc:users:789 and location ncc:locations:171.
info: Processed checkin 1609602085397-0.
debug: Updating user ncc:users:455 and location ncc:locations:181.
info: Processed checkin 1609603711960-0.
debug: Updating user ncc:users:752 and location ncc:locations:100.
info: Processed checkin 1609604227545-0.
debug: Updating user ncc:users:102 and location ncc:locations:144.
info: Processed checkin 1609604778689-0.
debug: Updating user ncc:users:180 and location ncc:locations:13.
info: Processed checkin 1609605397408-0.
...

Stop the Checkin Processor with Ctrl-C after it has processed a few checkins. Note the ID of the last checkin processed (this is 1609605397408-0 in the example above). Also note the user and location ID for the last checkin processed (user 180, location 13 in the example above).

Verify that the Checkin Processor stored this ID in Redis so that it knows where to start from when it's restarted. Using redis-cli or RedisInsight, take a look at the contents of the key ncc:checkinprocessor:lastid:

127.0.0.1:6379> get ncc:checkinprocessor:lastid
"1609605397408-0"

The value should match the last checkin ID that was processed.

Finally, let's verify that the Checkin Processor updated the user's Hash with details from that checkin. Use RedisInsight or the HGETALL command in redis-cli to look at the hash whose key is ncc:users:<user-id>, replacing <user-id> with the ID of the user that you noted earlier.

So for my example, let's look at user 180:

127.0.0.1:6379> hgetall ncc:users:180
1) "id"
2) "180"
3) "firstName"
4) "Sophia"
5) "lastName"
6) "Marshall"
7) "email"
8) "sophia.marshall@example.com"
9) "password"
10) "$2b$05$DPSHjaW44H4fn9sudfz/5.f1WcuZMrA0OZIp0CALQf0MH8zH1SSda"
11) "numCheckins"
12) "2332"
13) "lastCheckin"
14) "1609605397408"
15) "lastSeenAt"
16) "13"

Verify that the value for lastCheckin is the timestamp from the last processed checkin's ID (1609605397408) in my case, and that the lastSeenAt value is the location ID from the last processed checkin (13 in my case).

Coding exercise

In this exercise, you'll implement a new route in the API Server component. This route will return only the most recent checkin from the checkins stream. You'll use the XREVRANGE command for this.

First, make sure the API Server is running:

$ npm run dev

(remember that this starts the server with nodemon, so as you modify the code and save your changes it will automatically restart and run the new code).

Open the node-js-crash-course folder with your IDE, and open the file src/routes/checkin_routes.js. Locate the function that handles the /checkins/latest route.

Using the XREVRANGE documentation as a guide, modify the following line to invoke XREVRANGE so that it returns just the most recent checkin:

const latestCheckin = await redisClient.xrevrange(checkinStreamKey, 'TODO');

Remember: When using ioredis, each parameter to a Redis command needs to be passed as a separate value.

Test your code by visiting http://localhost:8081/checkins/latest - you should see a JSON representation of a checkin.

To make sure your code returns the latest checkin, you need to POST a checking using Postman. Start the Checkin Receiver component in a new terminal window:

$ npm run checkinreceiver

Then use Postman to POST a checkin. In Postman, open a new request, configure it as shown, and press Send:

Checkin Test with Postman

Now when you refresh http://localhost:8081/checkins/latest in your browser, the values shown should match those that you supplied in Postman.

External Resources

In this video, Justin introduces Redis Streams with an example application that's very similar to the one we're building in this course:

In our example application, we process stream entries in Node.js using the array representation that ioredis returns by default. In this video, I look at using advanced features of ioredis to make it return JavaScript objects instead:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/coursewrapup/index.html b/develop/node/nodecrashcourse/coursewrapup/index.html index c1dcdf547c..e489e9c6c0 100644 --- a/develop/node/nodecrashcourse/coursewrapup/index.html +++ b/develop/node/nodecrashcourse/coursewrapup/index.html @@ -4,7 +4,7 @@ Course Wrap Up | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Course Wrap Up


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

Thanks for taking the time to complete this course. We hope that you found it useful and that you've learned a few things about using Redis as a data store and as a cache in your Node.js applications.

Don't forget that the application source code is available on GitHub for you to browse and use in your own projects.

Stay in touch! We'd love to hear from you and see what you build with Redis. Why not join us on Discord and subscribe to our YouTube channel where we publish new videos regularly.

Finally, if you'd like to continue your learning journey, check out Redis University for free, self-paced online courses.

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/domainobjectswithhashes/index.html b/develop/node/nodecrashcourse/domainobjectswithhashes/index.html index 4ad2ca107d..0ee81ce7be 100644 --- a/develop/node/nodecrashcourse/domainobjectswithhashes/index.html +++ b/develop/node/nodecrashcourse/domainobjectswithhashes/index.html @@ -4,7 +4,7 @@ Managing Domain Objects with Redis Hashes | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Managing Domain Objects with Redis Hashes


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

In this module, you'll see how we're using Redis Hashes to model the user and location data in our application.

Coding Exercise

In your first coding exercise, you'll be adding a new route that takes a user's ID and returns their full name.

Using your IDE, open the node-js-crash-course folder that you cloned the GitHub repository into. Open the file src/routes/user_routes.js and find the route /user/:userId/fullname which looks like this:

// EXERCISE: Get user's full name.
router.get(
'/user/:userId/fullname',
[param('userId').isInt({ min: 1 }), apiErrorReporter],
async (req, res) => {
const { userId } = req.params;
/* eslint-disable no-unused-vars */
const userKey = redis.getKeyName('users', userId);
/* eslint-enable */

// TODO: Get the firstName and lastName fields from the
// user hash whose key is in userKey.
// HINT: Check out the HMGET command...
// https://redis.io/commands/hmget
const [firstName, lastName] = ['TODO', 'TODO'];

res.status(200).json({ fullName: `${firstName} ${lastName}` });
},
);

In this exercise, you'll modify the code to return the user's full name by retrieving the firstName and lastName fields for the requested user from Redis.

First, make sure your server is still running, if not start it with:

$ npm run dev

Next, browse to http://localhost:8081/api/user/5/fullname

You should see:

{
"fullName": "TODO TODO"
}

Take a look at the documentation for the Redis HMGET command, which retrieves multiple named fields from a Redis Hash. You'll need to add code that calls the Redis client's hmget function, then place the values returned into the firstName and lastName variables. You should be able to retrieve both values using a single call to hmget. For guidance on how to invoke Redis commands, check out the code for the /user/:userId route which calls the HGETALL command.

nodemon will restart the server automatically for you each time you save your changes.

When you're ready to test your solution, browse to http://localhost:8081/api/user/5/fullname and you should see:

{
"fullName": "Alejandro Reyes"
}

If you need help from our team, join us in Discord.

External Resources

In this video, Justin explains what Redis Hashes are and shows how common Redis Hash commands work:

You can find documentation for all of the Redis Hash commands on redis.io.

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/introducingredisinsight/index.html b/develop/node/nodecrashcourse/introducingredisinsight/index.html index e498ba8748..12417aef53 100644 --- a/develop/node/nodecrashcourse/introducingredisinsight/index.html +++ b/develop/node/nodecrashcourse/introducingredisinsight/index.html @@ -4,7 +4,7 @@ Introducing RedisInsight | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Introducing RedisInsight


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

RedisInsight is a free product from Redis that provides an intuitive graphical user interface for managing Redis databases. RedisInsight allows you to browse Redis and monitor changes in data in real time. You can edit data stored in existing keys, create and delete new ones, and run redis-cli commands.

RedisInsight also supports some popular Redis features, and we'll use it in this course to look at data managed by Redis JSON and Search. The data type specific views in RedisInsight make visualizing even the most complex Redis data types easy. We'll benefit from this when we look at Redis Streams later in the course.

For this course, we'll be running Redis in a Docker container. While you can complete the course using just the redis-cli interface provided with the container, we'd strongly recommend that you download and install RedisInsight to benefit from its graphical interface and specialized views for Redis data types and modules.

We'll cover how to connect RedisInsight to our Redis server in the "Up and Running with the Sample Application" module shortly.

External Resources

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/introductiontomodules/index.html b/develop/node/nodecrashcourse/introductiontomodules/index.html index a6613faa86..72daa25a3c 100644 --- a/develop/node/nodecrashcourse/introductiontomodules/index.html +++ b/develop/node/nodecrashcourse/introductiontomodules/index.html @@ -4,7 +4,7 @@ Redis Extensibility | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis Extensibility


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

Redis Modules

How to extend Redis?

Redis has a Modules API that allows developers to extend its core functionality with new capabilities. Redis Modules are libraries that can be written in C or other languages (including Rust and Zig). Modules can add new commands and/or data structures to Redis. For example, through the addition of modules, Redis can be extended to become a Timeseries or Graph database, while retaining all of its original key/value store functionality.

Modules are loaded into the Redis server at startup by modifying the redis.conf configuration file. Application developers then make use of the extra functionality provided by a module simply by calling the module's commands in the same way they would any other Redis command. We'll see how to do this using the ioredis client for Node.js shortly.

Where to find modules?

The redis.io website has a catalog of available modules. Redis has developed a number of these that extend Redis in different ways, and we'll use some of them in our sample application.

One way to get started with Redis modules is to use the Redis Stack Docker container from Docker Hub. This is the container that you're using on this course, and it includes all of the following capabilities:

  • Search and Query - a full-featured search and query engine.
  • Time Series - a timeseries database.
  • JSON - adds a native JSON data type to Redis.
  • Probabilistic - adds native Bloom and Cuckoo filter data types to Redis, plus other probabilistic data structures.

Redis offers Redis Enterprise Cloud, a fully managed service for running and scaling Redis and Redis Stack. Sign up for Redis Enterprise Cloud and use the full-featured free tier to try it out!

Using Redis Stack in our Application

Our social checkin application uses three Redis Stack capabilities:

  • We'll use Search and Query to index our user and location Hashes, giving us the ability to perform queries such as:
    • "Which user is associated with the email address sara.olsen@example.com?"
    • "Find me the users with the most recent checkins".
    • "Find all restaurants within a 3 mile radius of my location that have at least a 3 star rating"
  • JSON adds commands to store and manipulate JSON documents. We'll use those to retrieve extra detail about each of our locations.
  • And finally, we'll take advantage of a space efficient Probabilistic Bloom filter to stop users from posting duplicate checkins.

In the next section, we'll get to grips with Redis JSON...

External Resources

Check out these links to learn more about which modules are available for Redis and how to use the modules API to create you own:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/managingsuccess/index.html b/develop/node/nodecrashcourse/managingsuccess/index.html index e3c8d640ab..99003c198f 100644 --- a/develop/node/nodecrashcourse/managingsuccess/index.html +++ b/develop/node/nodecrashcourse/managingsuccess/index.html @@ -4,7 +4,7 @@ Managing Success with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Managing Success with Redis


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

We launched our checkin application, and it's been an incredible success! Users love it and are checking in to all of the locations that signed up to be part of it!

In the next few modules, we'll look at how Redis can help us manage growth as we gain more users, process more checkins, and increase our usage of external services.

We'll look at:

  • Using Redis as a cache in an Express application.
  • Scaling horizontally by using Redis as a session store for Express.
  • Scaling checkin processing with Redis Streams consumer groups.
  • Preventing duplicate checkins using probabilistic data structures.
- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisandnodejs/index.html b/develop/node/nodecrashcourse/redisandnodejs/index.html index 147b0dbf5e..de5ec82e0a 100644 --- a/develop/node/nodecrashcourse/redisandnodejs/index.html +++ b/develop/node/nodecrashcourse/redisandnodejs/index.html @@ -4,7 +4,7 @@ Using Redis from Node.js | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Using Redis from Node.js


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

To connect to Redis from an application, we need a Redis client library for the language that we're coding in. Redis clients perform the following functions:

  • Manage the connections between our application and the Redis server.
  • Handle network communications to the Redis server using Redis' wire protocol.
  • Provide a language specific API that we use in our application.

For Node.js, there are two popular Redis clients: ioredis and node_redis. Both clients expose similar programming APIs, wrapping each Redis command as a function that we can call in a Node.js script. For this course, we'll use ioredis which has built in support for modern JavaScript features such as Promises.

Here's a complete Node.js script that uses ioredis to perform the SET and GET commands that we previously tried in redis-cli:

const Redis = require('ioredis');

const redisDemo = async () => {
// Connect to Redis at 127.0.0.1, port 6379.
const redisClient = new Redis({
host: '127.0.0.1',
port: 6379,
});

// Set key "myname" to have value "Simon Prickett".
await redisClient.set('myname', 'Simon Prickett');

// Get the value held at key "myname" and log it.
const value = await redisClient.get('myname');
console.log(value);

// Disconnect from Redis.
redisClient.quit();
};

redisDemo();

ioredis wraps each Redis command in a function that can either accept a callback or return a Promise. Here, I'm using async/await to wait for each command to be executed on the Redis server before moving on to the next.

Running this code displays the value that's now stored in Redis:

$ node basic_set_get.js
Simon Prickett

External Resources

The following additional resources can help you understand how to access Redis from a Node.js application:

  • ioredis: Home page for the ioredis client.
  • node_redis: Home page for the node_redis client.
  • RU102JS, Redis for JavaScript Developers: A free online course at Redis University that provides a deep dive into Redis for Node.js applications. You can expect to learn how to make connections to Redis, store and retrieve data, and leverage essential Redis features such as sorted sets and streams.
  • Redis clients by programming language: A large list of Redis clients at redis.io.

In this video, I take a look at how to get up and running with the ioredis client:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisbloom/index.html b/develop/node/nodecrashcourse/redisbloom/index.html index 63c949b28e..2b4e1ceb08 100644 --- a/develop/node/nodecrashcourse/redisbloom/index.html +++ b/develop/node/nodecrashcourse/redisbloom/index.html @@ -4,7 +4,7 @@ Preventing Duplicate Checkins with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Preventing Duplicate Checkins with Redis


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

As our application grows in popularity, we're getting more and more checkins from our expanding user base. We've decided that we want to limit this a bit, and only allow each user to give each location a particular star rating once. For example, if user 100 checks in at location 73 and rates it 3 stars, we want to reject any further 3 star checkins from them at that location.

In order to do this, we need a way of remembering each checkin, and to quickly determine if we've seen it before. We can't do this by querying the data in our checkins stream, as streams don't allow that sort of access and are periodically trimmed, removing older checkins that have been processed and are no longer needed.

We can represent our checkins in the form <userId>:<locationId>:<rating>. With this schema, the string 100733 would represent user 100's checkin at location 73 with a 3 star rating.

We then need to remember each checkin, so that we can ensure it's a unique combination of user ID, location ID, and star rating. We could use a Redis Set for this. Sets are great whenever we want to maintain a collection of unique members, as they don't allow duplicates. With a Redis Set, adding a new member and checking whether a member is in the Set are both O(1) operations, meaning their performance doesn't degrade as the Set grows.

However, every new member of a Set that we add (in our case these are unique checkins) causes the Set to take up more memory on the Redis server. And this growth will become an issue as we continue to receive more and more checkins.

But what if there was a way to check if a potential new member was already in a set that didn't have this memory consumption issue? A Bloom Filter is a space-efficient probabilistic data structure that can help here. Bloom Filters store hashed representations of the members of a Set rather than the actual member data itself. Unlike a Set, we can't get members back out of the Bloom Filter, but we can test if something is already in there or not... with some false positives due to hash collisions. When asked if something is a member of a Set, the Bloom Filter can tell us "no it isn't", or "it's likely that it is".

This hashing approach sacrifices the 100% accuracy we'd get with a Set to dramatically reduce the memory overhead. Bloom Filters can be configured with a desired acceptable error rate, so for our application this seems like a good way to enforce our "no duplicate checkins" rule without having a runaway memory consumption problem. Whenever the Bloom Filter tells us its maybe seen a checkin before it will mostly be correct, and we'll accept that sometimes we disallow a checkin that we actually haven't had before as a sensible tradeoff for keeping our memory usage under control.

Redis Stack provides a Bloom Filter implementation for Redis, along with other useful probabilistic data structures. In the video, you'll see how easy this is to use in a Node.js application, with no math skills required!

Hands-on Exercise

In this exercise, you'll see the Bloom filter in action by attempting to submit the same checkin to the system more than once.

You'll need to be running the Checkin Receiver Service... stop it with Ctrl-C if it's still running from a previous exercise. Then, restart it using the following command. This command will disable the login requirement which we don't want for this exercise:

$ npm run checkinreceiver

> js-crash-course@0.0.1 checkinreceiver
> node ./src/checkinreceiver.js

info: Authentication disabled, checkins do not require a valid user session.
info: Checkin receiver listening on port 8082.

Now, open Postman and create a new request, selecting "POST" as the HTTP verb.

  • Set the URL to localhost:8082/api/checkin
  • In the "Body" tab, set the type dropdowns to "raw" and "JSON"
  • In the body text area, enter the following JSON:
{ "userId": 100, "locationId": 73, "starRating": 3 }

Your request should look like this:

Checkin Request

Click "Send" to submit your checkin to the Checkin Receiver, which should respond with a 202 Accepted status and empty response body:

202 Checkin Response

Click "Send" a second time and you should receive a 422 Unprocessable Entity response from the Checkin Receiver along with an error message:

422 Checkin Response

With the Checkin Receiver service still running, start the Checkin Generator utility that generates random checkins:

node-js-crash-course $ npm run checkingenerator

> js-crash-course@0.0.1 checkingenerator
> node ./src/checkingenerator.js

info: Started checkin generator.

Leave the Checkin Generator running. It will generate a new random checkin every few seconds. Let it run and generate a few hundred checkins. While it's doing that, periodically monitor the memory usage required by the Bloom Filter using redis-cli or the CLI tab in RedisInsight:

127.0.0.1:6379> bf.info ncc:checkinfilter
1) Capacity
2) (integer) 1000000
3) Size
4) (integer) 2576760
5) Number of filters
6) (integer) 1
7) Number of items inserted
8) (integer) 269
9) Expansion rate
10) (integer) 2

Run this a few times as more checkins are generated, and note that the size required to store the Bloom Filter doesn't increase as the number of items inserted increases. While sacrificing some accuracy, Bloom Filters are a storage efficient solution for this type of use case.

External Resources

In this video, Guy Royse explains what Bloom Filters are and how to use them in Redis:

Redis Sets are a powerful data type to know about, learn more with Andrew's two videos on the Redis University YouTube channel. First, Redis Sets Explained:

Followed by Redis Sets Elaborated:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisearch/index.html b/develop/node/nodecrashcourse/redisearch/index.html index db7de89fea..33be7ee236 100644 --- a/develop/node/nodecrashcourse/redisearch/index.html +++ b/develop/node/nodecrashcourse/redisearch/index.html @@ -4,7 +4,7 @@ Indexing and Querying with Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Indexing and Querying with Redis


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

We chose to store our user and location data in Redis Hashes. Hashes are a great fit for storing domain objects. Recall that we've chosen to store each user in a Hash whose key contains the user ID. For example, here's user 852 as seen in RedisInsight:

User 852 viewed with RedisInsight

If you're using redis-cli, you can look at user 852 with the HGETALL command:

127.0.0.1:6379> hgetall ncc:users:852
1) "id"
2) "852"
3) "firstName"
4) "Dominik"
5) "lastName"
6) "Schiffmann"
7) "email"
8) "dominik.schiffmann@example.com"
9) "password"
10) "$2b$05$xbkSwODz1tWqdE7xWb393eiYIQcdiEdbbvhK88.Xr9sW7WxdI26qi"
11) "numCheckins"
12) "9353"
13) "lastCheckin"
14) "1488517098363"
15) "lastSeenAt"
16) "124"

Storing data in Hashes means that we can easily and efficiently retrieve the contents of the Hash, provided that we know the key. So it's trivial to look up user 852, but how can we perform any of the following operations?

  • Get the user whose email address is dominik.schiffmann@example.com.
  • Find all users that were last seen at location 124.
  • Find all the users who have between 1000 and 3000 checkins.
  • Find all locations within a 10 mile radius of a given latitude / longitude coordinate and which have at least a 3 star rating.

Redis is a key/value database. This means that its data model is optimized for retrieval by key. The queries above can't be resolved by knowing just the Hash key - we need some other mechanism to index our data.

Traditionally in a key/value database, this has meant adding code to create and manually update indexes. For example to resolve the query "which user has the email address dominik.schiffmann@example.com", we might create a new String key containing that email address, with the value being the user's ID:

127.0.0.1:6379> set ncc:users:byemail:dominik.schiffmann@example.com 852
OK

Now, if we want to get Dominik's user details given only his email address, we have a two step process to follow:

  1. Look up the user ID for the user associated with the email address we have.
  2. Use that user ID to retrieve the values from the user's Hash.
127.0.0.1:6379> get ncc:users:byemail:dominik.schiffmann@example.com
"852"
127.0.0.1:6379> hgetall ncc:users:852
1) "id"
2) "852"
3) "firstName"
4) "Dominik"
5) "lastName"
6) "Schiffmann"
7) "email"
8) "dominik.schiffmann@example.com"
9) "password"
10) "$2b$05$xbkSwODz1tWqdE7xWb393eiYIQcdiEdbbvhK88.Xr9sW7WxdI26qi"
11) "numCheckins"
12) "9353"
13) "lastCheckin"
14) "1488517098363"
15) "lastSeenAt"
16) "124"

We'd also need to keep this information up to date and in sync with changes to the Hash at ncc:users:852 ourselves in our application code.

Other sorts of secondary indexes can be created using other Redis data types. For example, we might use a Redis Sorted Set as a secondary index, allowing us to perform range queries such as "Find all the users who have between 1000 and 3000 checkins". Again, we'd have to populate and maintain this extra data structure ourselves in the application code.

Redis Stack solves all of these problems for us and more. It adds an indexing, querying and full-text search engine to Redis that automatically keeps track of changes to data in indexed Hashes. Redis Stack provides a flexible query language to answer questions such as "Find me all the gyms with at least a 3 star rating and more than 200 checkins within 10 miles of Oakland, California" without adding code to build or maintain secondary data structures in our application.

Watch the video to see how Redis Stack is used in our example Node.js application.

Coding Exercise

In this exercise, you'll finish implementing a route that uses Redis to return all users whose last checkin was at a given location.

Open the node-js-crash-course folder with your IDE, and find the file src/routes/user_routes.js.

In this file, you'll see a partly implemented route /users/at/:locationId. To complete this exercise, you'll need to replace this line:

const searchResults = await redis.performSearch(
redis.getKeyName('usersidx'),
'TODO... YOUR QUERY HERE',
);

with one containing the correct query to return users whose "lastSeenAt" field is set to the value of locationId. You'll need to use the "numeric range" syntax for this, as the "lastSeenAt" field was indexed as a number. Be sure to check out the Query Syntax documentation for Redis to get help with this.

To try your code, ensure that the API Server component is running:

$ npm run dev

(remember, this will use nodemon to restart the server any time you save a code change).

Then, point your browser at http://localhost:8081/api/users/at/33. If your query is correct, you should see output similar to the following (actual users may differ, just ensure that the value of lastSeenAt for each matches the location ID you provided - 33 in this case):

[
{
"id": "238",
"firstName": "Jonas",
"lastName": "Nielsen",
"numCheckins": "7149",
"lastCheckin": "1515248028256",
"lastSeenAt": "33"
},
{
"id": "324",
"firstName": "Frans",
"lastName": "Potze",
"numCheckins": "8623",
"lastCheckin": "1515976232073",
"lastSeenAt": "33"
},
...
]

To help you develop your query, use one of the guides in RedisInsight workbench, or read more about the FT.SEARCH command.

External Resources

Querying, Index, and Full-Text Search in Redis:

Finding Bigfoot RESTfuly with Express + Redis Stack:

Other resources:

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/redisjson/index.html b/develop/node/nodecrashcourse/redisjson/index.html index 207fd209dd..06543ed7fa 100644 --- a/develop/node/nodecrashcourse/redisjson/index.html +++ b/develop/node/nodecrashcourse/redisjson/index.html @@ -4,7 +4,7 @@ Managing Document Data with JSON | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Managing Document Data with JSON


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

We used Redis' built-in Hash data type to represent our user and location entities. Hashes are great for this, but they are limited in that they can only contain flat name/value pairs. For our locations, we want to store extra details in a more structured way.

Here's an example of the additional data we want to store about a location:

{
"id": 121,
"hours": [
{ "day": "Monday", "hours": "6-7" },
{ "day": "Tuesday", "hours": "6-7" },
{ "day": "Wednesday", "hours": "7-8" },
{ "day": "Thursday", "hours": "6-9" },
{ "day": "Friday", "hours": "8-5" },
{ "day": "Saturday", "hours": "9-6" },
{ "day": "Sunday", "hours": "6-4" }
],
"socials": [
{
"instagram": "theginclub",
"facebook": "theginclub",
"twitter": "theginclub"
}
],
"website": "www.theginclub.com",
"description": "Lorem ipsum...",
"phone": "(318) 251-0608"
}

We could store this data as serialized JSON in a Redis String, but then our application would have to retrieve and parse the entire document every time it wanted to read some of the data. And we'd have to do the same to update it too. Furthermore, with this approach, update operations aren't atomic and a second client could update the JSON stored at a given key while we're making changes to it in our application code. Then, when we serialize our version of the JSON back into the Redis String, the other client's changes would be lost.

Redis Stack adds a new JSON data type to Redis, and a query syntax for selecting and updating individual elements in a JSON document atomically on the Redis server. This makes our application code simpler, more efficient, and much more reliable.

Coding Exercise

In this exercise, you'll complete the code for an API route that gets just the object representing a location's opening hours for a given day. Open the file src/routes/location_routes.js, and find the route for /location/:locationId/hours/:day. The starter code looks like this:

// EXERCISE: Get opening hours for a given day.
router.get(
'/location/:locationId/hours/:day',
[
param('locationId').isInt({ min: 1 }),
param('day').isInt({ min: 0, max: 6 }),
apiErrorReporter,
],
async (req, res) => {
/* eslint-disable no-unused-vars */
const { locationId, day } = req.params;
/* eslint-enable */
const locationDetailsKey = redis.getKeyName('locationdetails', locationId);

// TODO: Get the opening hours for a given day from
// the JSON stored at the key held in locationDetailsKey.
// You will need to provide the correct JSON path to the hours
// array and return the element held in the position specified by
// the day variable. Make sure Redis JSON returns only the day
// requested!
const jsonPath = 'TODO';

/* eslint-enable no-unused-vars */
const hoursForDay = JSON.parse(
await redisClient.call('JSON.GET', locationDetailsKey, jsonPath),
);
/* eslint-disable */

// If null response, return empty object.
res.status(200).json(hoursForDay || {});
},
);

You'll need to update the code to provide the correct JSON path, replacing the "TODO" value with a JSON path expression.

Looking at the JSON stored at key ncc:locationdetails:121, we see that the opening hours are stored as an array of objects in a field named hours, where day 0 is Monday and day 6 is Sunday:

Location Details in RedisInsight

So you'll need a JSON path query that gets the right element from the hours array depending on the value stored in the variable day.

If you're using redis-cli, you can look at the structure of the JSON document with the following command:

json.get ncc:locationdetails:121 .

Make sure your query returns only the day requested, so that you don't have to write Node.js code to filter the value returned from Redis. Use the JSON path syntax page to help you formulate the right query.

To test your code, start the server with:

$ npm run dev

Recall that this will allow you to edit the code and try your changes without restarting the server.

If you have the correct JSON path in your code, visiting http://localhost:80801/api/location/121/hours/2 should return:

{
"day": "Wednesday",
"hours": "7-8"
}

Don't forget that if you have questions or need help, we're available on Discord.

External Resources

In this video, Justin introduces JSON using a fun taco truck example!

Learn more about JSON at https://redis.io/docs/stack/json/.

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/runningtheapplication/index.html b/develop/node/nodecrashcourse/runningtheapplication/index.html index 4aadf88fff..7f64b3784c 100644 --- a/develop/node/nodecrashcourse/runningtheapplication/index.html +++ b/develop/node/nodecrashcourse/runningtheapplication/index.html @@ -4,7 +4,7 @@ Up and Running with the Sample Application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Up and Running with the Sample Application


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

Let's get hands on, clone the application repository from GitHub, start up Redis in a Docker container, and load the sample data!

Reminder - Software Prerequisites

To get the most from this course, you'll need a machine that can run the application and the Redis server. The application runs directly on your machine and the Redis server runs in a Docker container.

You'll need the following installed on your machine:

  • Docker (you'll need the docker-compose command)
  • Node.js (use the current Long Term Stable - LTS - version)
  • git command line tools
  • Your favorite IDE (we like VSCode, but anything you're comfortable with works)
  • Postman - we're going to make some API calls and Postman makes that easy.

Setup / Installation Process

Get the Code and Install Dependencies

Clone the course repo from GitHub and install the dependencies:

$ git clone https://github.com/redislabs-training/node-js-crash-course.git
$ cd node-js-crash-course
$ npm install

Start Redis (Docker)

From the node-js-crash-course directory, start Redis using docker-compose:

$ docker-compose up -d
Creating network "node-js-crash-course_default" with the default driver
Creating rediscrashcourse ... done
$ docker ps

The output from the docker ps command should show one container running, using the "redis/redis-stack" image. This container runs Redis with the Search, JSON, Time Series, and Probabilistic data structures.

Load the Sample Data into Redis

Load the course example data using the provided data loader. This is a Node.js application:

$ npm run load all
> node src/utils/dataloader.js -- "all"

Loading user data...
User data loaded with 0 errors.
Loading location data...
Location data loaded with 0 errors.
Loading location details...
Location detail data loaded with 0 errors.
Loading checkin stream entries...
Loaded 5000 checkin stream entries.
Creating consumer group...
Consumer group created.
Dropping any existing indexes, creating new indexes...
Created indexes.
Deleting any previous bloom filter, creating new bloom filter...
Created bloom filter.

In another terminal window, run the redis-cli executable that's in the Docker container. Then, enter the Redis commands shown at the redis-cli prompt to verify that data loaded successfully:

$ docker exec -it rediscrashcourse redis-cli
127.0.0.1:6379> hgetall ncc:locations:106
1) "id"
2) "106"
3) "name"
4) "Viva Bubble Tea"
5) "category"
6) "cafe"
7) "location"
8) "-122.268645,37.764288"
9) "numCheckins"
10) "886"
11) "numStars"
12) "1073"
13) "averageStars"
14) "1"
127.0.0.1:6379> hgetall ncc:users:12
1) "id"
2) "12"
3) "firstName"
4) "Franziska"
5) "lastName"
6) "Sieben"
7) "email"
8) "franziska.sieben@example.com"
9) "password"
10) "$2b$05$uV38PUcdFD3Gm6ElMlBkE.lzZutqWVE6R6ro48GsEjcmnioaZZ55C"
11) "numCheckins"
12) "8945"
13) "lastCheckin"
14) "1490641385511"
15) "lastSeenAt"
16) "22"
127.0.0.1:6379> xlen ncc:checkins
(integer) 5000

Start and Configure RedisInsight

If you're using RedisInsight, start it up and it should open in your browser automatically. If not, point your browser at http://localhost:8001.

If this is your first time using RedisInsight click "I already have a database".

If you already have other Redis databases configured in RedisInsight, click "Add Redis Database".

Now, click "Connect to a Redis Database Using hostname and port". Configure the database details as shown below, then click "Add Redis Database".

Configuring RedisInsight

You should now be able to browse your Redis instance. If you need more guidance on how to connect to Redis from RedisInsight, check out Justin's video below but be sure to use 127.0.0.1 as the host, 6379 as the port and leave the username and password fields blank when configuring your database.

Start the Application

Now it's time to start the API Server component of the application and make sure it connects to Redis. This component listens on port 8081.

If port 8081 is in use on your system, edit this section of the config.json file and pick another available port:

"application": {
"port": 8081
},

Start the server like this:

$ npm run dev

> ./node_modules/nodemon/bin/nodemon.js

[nodemon] 2.0.7
[nodemon] to restart at any time, enter `rs`
[nodemon] watching path(s): *.*
[nodemon] watching extensions: js,mjs,json
[nodemon] starting `node src/server.js`
Warning: Environment variable WEATHER_API_KEY is not set!
info: Application listening on port 8081.

This starts the application using nodemon, which monitors for changes in the source code and will restart the server when a change is detected. This will be useful in the next module where you'll be making some code changes.

Ignore the warning about WEATHER_API_KEY — we'll address this in a later exercise when we look at using Redis as a cache.

To verify that the server is running correctly and connected to Redis, point your browser at:

http://localhost:8081/api/location/200

You should see the summary information for location 200, Katia's Kitchen:

{
"id": "200",
"name": "Katia's Kitchen",
"category": "restaurant",
"location": "-122.2349598,37.7356811",
"numCheckins": "359",
"numStars": "1021",
"averageStars": "3"
}

Great! Now you're up and running. Let's move on to the next module and see how we're using Redis Hashes in the application. You'll also get to write some code!

Stopping redis-cli, the Redis Container and the Application

Don't do this now, as we’ve only just started! However, when you do want to shut everything down, here's how to do it...

To stop running redis-cli, simply enter the quit command at the redis-cli prompt:

127.0.0.1:6379> quit
$

To stop the Redis Server, make sure you are in the node-js-crash-course folder that you checked the application repo out to, then:

$ docker-compose down
Stopping rediscrashcourse ... done
Removing rediscrashcourse ... done
Removing network node-js-crash-course_default

Redis persists data to the "redisdata" folder. If you want to remove this, just delete it:

$ rm -rf redisdata

To stop each of the application's components, press Ctrl+C in the terminal window that the component is running in. For example, to stop the API server:

$ npm run dev

> ./node_modules/nodemon/bin/nodemon.js

[nodemon] 2.0.7
[nodemon] to restart at any time, enter `rs`
[nodemon] watching path(s): *.*
[nodemon] watching extensions: js,mjs,json
[nodemon] starting `node src/server.js`
info: Application listening on port 8081.
^C
node-js-crash-course $
- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/sampleapplicationoverview/index.html b/develop/node/nodecrashcourse/sampleapplicationoverview/index.html index 5621d32fe1..a3c491817b 100644 --- a/develop/node/nodecrashcourse/sampleapplicationoverview/index.html +++ b/develop/node/nodecrashcourse/sampleapplicationoverview/index.html @@ -4,7 +4,7 @@ Sample Application Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Sample Application Overview


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

In this course, we'll look at how to use Redis as a data store and cache in the context of a sample application. Imagine that we're building a sort of social network application where users can "check in" at different locations and give them a star rating… from 0 for an awful experience through 5 to report that they had the best time ever there!

When designing our application, we determined that there's a need to manage data about three main entities:

  • Users
  • Locations
  • Checkins

Let's look at what we're storing about each of these entities. As we're using Redis as our only data store, we'll also consider how they map to Redis data types...

Users

We'll represent each user as a flat map of name/value pairs with no nested objects. As we'll see later on, this maps nicely to a Redis Hash. Here's a JSON representation of the schema we'll use to represent each user:

{
"id": 99,
"firstName": "Isabella",
"lastName": "Pedersen",
"email": "isabella.pedersen@example.com",
"password": "xxxxxx1",
"numCheckins": 8073,
"lastCheckin": 1544372326893,
"lastSeenAt": 138
}

We've given each user an ID and we're storing basic information about them. Also, we’ll encrypt their password using bcrypt when we load the sample data into Redis.

For each user, we'll keep track of the total number of checkins that they've submitted to the system, and the timestamp and location ID of their most recent checkin so that we know where and when they last used the system.

Locations

For each location that users can check in at, we're going to maintain two types of data. The first of these is also a flat map of name/value pairs, containing summary information about the location:

{
"id": 138,
"name": "Stacey's Country Bakehouse",
"category": "restaurant",
"location": "-122.195447,37.774636",
"numCheckins": 170,
"numStars": 724,
"averageStars": 4
}

We've given each location an ID and a category—we'll use the category to search for locations by type later on. The "location" field stores the coordinates in longitude, latitude format… this is the opposite from the usual latitude, longitude format. We'll see how to use this to perform geospatial searches later when we look at Redis Search.

For each location, we're also storing the total number of checkins that have been recorded there by all of our users, the total number of stars that those checkins gave the location, and an average star rating per checkin for the location.

The second type of data that we want to maintain for each location is what we'll call "location details". These take the form of more structured JSON documents with nested objects and arrays. Here's an example for location 138, Stacey's Country Bakehouse:

{
"id": 138,
"hours": [
{ "day": "Monday", "hours": "8-7" },
{ "day": "Tuesday", "hours": "9-7" },
{ "day": "Wednesday", "hours": "6-8" },
{ "day": "Thursday", "hours": "6-6" },
{ "day": "Friday", "hours": "9-5" },
{ "day": "Saturday", "hours": "8-9" },
{ "day": "Sunday", "hours": "7-7" }
],
"socials": [
{
"instagram": "staceyscountrybakehouse",
"facebook": "staceyscountrybakehouse",
"twitter": "staceyscountrybakehouse"
}
],
"website": "www.staceyscountrybakehouse.com",
"description": "Lorem ipsum....",
"phone": "(316) 157-8620"
}

We want to build an API that allows us to retrieve all or some of these extra details, and keep the overall structure of the document intact. For that, we'll need Redis with JSON support as we'll see later.

Checkins

Checkins differ from users and locations in that they're not entities that we need to store forever. In our application, checkins consist of a user ID, a location ID, a star rating and a timestamp - we'll use these values to update attributes of our users and locations.

Each checkin can be thought of as a flat map of name/value pairs, for example:

{
"userId": 789,
"locationId": 171,
"starRating": 5
}

Here, we see that user 789 visited location 171 ("Hair by Parvinder") and was really impressed with the service.

We need a way to store checkins for long enough to process them, but not forever. We also need to associate a timestamp with each one, as we'll need that when we process the data.

Redis provides a Stream data type that's perfect for this - with Redis Streams, we can store maps of name/value pairs and have the Redis server timestamp them for us. Streams are also perfect for the sort of asynchronous processing we want to do with this data. When a user posts a new checkin to our API we want to store that data and respond to the user that we've received it as quickly as possible. Later we can have one or more other parts of the system do further processing with it. Such processing might include updating the total number of checkins and last seen at fields for a user, or calculating a new average star rating for a location.

Application Architecture

We decided to use Node.js with the Express framework and ioredis client to build the application. Rather than have a monolithic codebase, the application has been split out into four components or services. These are:

  • Authentication Service: Listens on an HTTP port and handles user authentication using Redis as a shared session store that other services can access.
  • Checkin Receiver: Listens on an HTTP port and receives checkins as HTTP POST requests from our users. Each checkin is placed in a Redis Stream for later processing.
  • Checkin Processor: Monitors the checkin Stream in Redis, updating user and location information as it processes each checkin.
  • API Server: Implements the bulk of the application's API endpoints, including those to retrieve information about users and locations from Redis.

These components fit together like so:

Application Architecture

There's also a data loader component, which we'll use to load some initial sample data into the system.

As we progress through the course, we'll look at each of these components in turn. In the next module, you'll get hands on and clone the application repo, start a Redis server with Docker, and load the sample data.

External Resourses

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/sessionstorage/index.html b/develop/node/nodecrashcourse/sessionstorage/index.html index 2c65845e28..cbbcefea3e 100644 --- a/develop/node/nodecrashcourse/sessionstorage/index.html +++ b/develop/node/nodecrashcourse/sessionstorage/index.html @@ -4,7 +4,7 @@ Scaling an Express Application with Redis as a Session Store | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Scaling an Express Application with Redis as a Session Store


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

We're building our application as a series of components, with Redis as a data store and cache. Most of these components talk to the user via the HTTP request / response cycle, which is inherently stateless. However, we want to maintain state, or remember things about the user from one request to the next in some circumstances.

We can identify users by having the application set a cookie in the response that is returned to the user - their browser will then attach this cookie to future requests. This can then be used to store information about the user in a server-side session object that Express keeps track of by means of the ID in the cookie.

You can think of the cookie as a sort of key, and the session object as a value. We could store these in memory in our server, but this approach limits us to a single server instance that would lose all of session data if it were to crash and restart.

Fortunately, Redis makes an excellent store for session data - it's fast and durable, and allows us to scale system components horizontally by adding more instances of them. We've used the npm package "connect-redis" to add Redis as a session store for the Authentication and Checkin Receiver services, with minimal code required.

Hands-on Exercise

In this exercise, you'll enable the authentication functionality in the Checkin Receiver service, and use Postman to establish a session with the Authentication service in order to submit an authenticated checkin to the system.

First, stop the Checkin Receiver if you have it running. Press Ctrl-C in its terminal window.

Now, restart the Checkin Receiver with the optional authentication functionality enabled:

$ npm run checkinreceiver auth

> js-crash-course@0.0.1 checkinreceiver
> node ./src/checkinreceiver.js "auth"

info: Authentication enabled, checkins require a valid user session.
info: Checkin receiver listening on port 8082.

Note that the Checkin Receiver reports that authentication is now enabled.

In a second terminal window, cd to the node-js-crash-course directory that you cloned the project repo into, and start the Authentication Service which will listen on port 8083:

$ npm run auth

> js-crash-course@0.0.1 auth /Users/simonprickett/source/github/node-js-crash-course
> node ./src/auth.js

info: Authentication service listening on port 8083.

Open up a new request in Postman, and make a POST request to perform a checkin as follows:

Authenticated Checkin Attempt

Click "Send" - this time, the Checkin Receiver service should reject the request as you haven't authenticated with the system. You should see a 401 Unauthorized response:

Checkin Attempt 401 Response

And in the Checkin Receiver's terminal window, you should see that it rejected your request as it couldn't find your session:

debug: Rejecting checkin - no valid user session found.

The Checkin Receiver rejected your request because it couldn't find a value for 'user' in your request's session (check out the code in src/checkinreceiver.js).

Let's try again, but this time we'll login first. This will establish a session and we'll be able to see how that gets stored in Redis…

Keep your checkin request tab in Postman, and open a new tab to create a POST request to the Authentication service.

Set the URL to localhost:8083/login and the JSON body to:

{ "email": "al.appelhof@example.com", "password": "secret123" }

Your request should look like this:

Login Request

Click "Send" to login. The Authentication Service will check the supplied credentials against what's stored in Redis at key ncc:user:77 and create a session object for that user, adding the email address to the "user" property in the session. Postman should show a 200 (success) response code and the text "OK".

Click the blue "Cookies" text in Postman, and you should see that a cookie was set for "checkinapp" on localhost. Click on the "checkinapp" text to see the contents of the cookie:

Local Cookies Cookie Detail

Now flip back to your tab in Postman that has the checkin request for user 77. Send the request again, and this time it should succeed, as Postman will also send the cookie containing your session ID and the Checkin Receiver will then be able to validate you have an authenticated session. This time, you should get a 202 Accepted response:

Authenticated Checkin 202 Response

When the Checkin Receiver got your request, it used the value in your cookie to look up and load your session from Redis (connect-redis does this for us), then checked that the session had a "user" property set. Any system component that's running on a localhost URL can now access your session and query and store information in it.

Finally, let's see what the session looks like in Redis... using redis-cli or the RedisInsight browser, find all the session keys like this:

127.0.0.1:6379> keys ncc:session:*
1) "ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl"

Note that the key name contains the session ID that was in your cookie. connect-redis manages these keys for us as Redis Strings, all our application needs to do is manipulate the request's session object.

Let's see what's in our session and when it expires:

127.0.0.1:6379> get ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
"{\"cookie\":{\"originalMaxAge\":null,\"expires\":null,\"httpOnly\":true,\"path\":\"/\"},\"user\":\"al.appelhof@example.com\"}"
127.0.0.1:6379> ttl ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
(integer) 85693

We didn't specify a session length in our code when setting up connect-redis, so it's defaulted to expiring sessions after a day.

Finally, let's logout from the Authentication Service and ensure that our session data gets removed from Redis. In Postman, create a GET request as follows and click Send:

Logout Request

The code in the logout handler destroys your session, which should then disappear from Redis:

127.0.0.1:6379> get ncc:session:Blvc-93k2TckafgwS0IDAHfW-MPGhqyl
(nil)

Next, we'll move on to look at how to scale up our checkin processing to cope with all these new users!

External Resources

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/welcome/index.html b/develop/node/nodecrashcourse/welcome/index.html index e7677b174e..7ef04a38ae 100644 --- a/develop/node/nodecrashcourse/welcome/index.html +++ b/develop/node/nodecrashcourse/welcome/index.html @@ -4,7 +4,7 @@ Welcome to the Node.js Crash Course | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Welcome to the Node.js Crash Course


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

Welcome to the Node.js Redis Crash Course. I'm Simon Prickett, Principal Developer Advocate at Redis.

In this course, you'll learn about using Redis with Node.js through a blend of video and text-based training. You can also get hands-on with some optional workshop exercises where you'll add new functionality to an existing Node.js application.

Learning Objectives

By following the materials in this course, you can expect to learn about:

  • What Redis is and where to find it.
  • The benefits of adding Redis to your stack, when and how to use it.
  • How to leverage Redis in the context of a modern Node.js Express application with the ioredis client.
  • Using RedisInsight to browse and manage a Redis instance.
  • Modeling domain objects with Redis Hashes, querying them with Redis Search.
  • Storing and querying document data with Redis JSON.
  • Asynchronous messaging and fast data ingestion with Redis Streams.
  • Speeding up response times with Redis caching and Express middleware.
  • Scaling your application using Redis as a session store for Express.

Software Prerequisites

To get the most from this course, you'll need a machine that can run Node.js applications, plus Docker as we'll be using a container that includes Redis and the required add-on modules for this course.

You'll need the following installed on your machine:

  • Docker (you'll need the docker-compose command)
  • Node.js (use the current Long Term Stable - LTS - version)
  • git command line tools
  • Your favorite IDE (we like VSCode, but anything you're comfortable with works)
  • Postman - we're going to make some API calls and Postman makes that easy.

Let's Learn Together

Sign up for the Redis Discord where you can ask me anything about the course!

- + \ No newline at end of file diff --git a/develop/node/nodecrashcourse/whatisredis/index.html b/develop/node/nodecrashcourse/whatisredis/index.html index 3ce126d6a4..048ae7b7ad 100644 --- a/develop/node/nodecrashcourse/whatisredis/index.html +++ b/develop/node/nodecrashcourse/whatisredis/index.html @@ -4,7 +4,7 @@ What is Redis?: An Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

What is Redis?: An Overview


Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

Redis is an open source data structure server. It belongs to the class of NoSQL databases known as key/value stores. Keys are unique identifiers, whose value can be one of the data types that Redis supports. These data types range from simple Strings, to Linked Lists, Sets and even Streams. Each data type has its own set of behaviours and commands associated with it.

For example, I can store my name in a Redis String and associate it with the key "myname" using a Redis SET command. I can then retrieve the value using a Redis GET command. Here's how that looks using redis-cli, a command line interface to Redis:

127.0.0.1:6379> set myname "Simon Prickett"
OK
127.0.0.1:6379> get myname
"Simon Prickett"

Keys in a Redis database are distributed in a flat keyspace. Redis does not enforce a schema or naming policy for keys. This provides great flexibility, with the organization of the keyspace being the responsibility of the developer. We'll look at ways of managing this later in the course.

Redis is famous for being an extremely fast database. This speed comes from the fact that it stores and serves all data from memory rather than disk. Redis is durable, so your data will be persisted, but all reads will be from a copy of the data held in memory. This makes Redis an excellent choice for applications that require real time data access.

Redis is also often used as a cache, and has specific functionality to support this. Redis can be extended with new functionality using plugin modules. We'll see how to use some of these as we make our way through the course.

External Resources

Here's some resources that we think will be useful to you as you discover Redis:

  • redis.io - the official website of open source Redis.
  • Redis Enterprise Cloud - a fully managed cloud service from Redis - you can try it out using the full featured free tier.
  • The official Redis Docker image.
  • For a comprehensive introduction to Redis, we recommend taking a look at the RU101, Introduction to Redis Data Structures course at Redis University. In this free online course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.
- + \ No newline at end of file diff --git a/develop/node/redis-om/index.html b/develop/node/redis-om/index.html index cec8cedee8..038234e26a 100644 --- a/develop/node/redis-om/index.html +++ b/develop/node/redis-om/index.html @@ -4,7 +4,7 @@ Up and Running with Express and Redis OM for Node.js in 5-minutes | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Up and Running with Express and Redis OM for Node.js in 5-minutes


Profile picture for Guy Royse
Author:
Guy Royse, Senior Developer Advocate at Redis

OK. So that title is a bold claim. And this is a read-and-follow-along sort of tutorial. So, it might be 6 minutes or 4 minutes depending on how fast you type. Regardless, this should get you building something useful quickly and could make a nice foundation for something bigger.

Oh, and you might be wondering what Redis OM is. Well, there's an extensive README on GitHub. Go check it out!

Also, this document, and the code that we're about to implement, and the data needed to test it are all out on GitHub. Refer to them as you need.

Let's Build Something

So, what are we going to build? We're going to build a RESTful service that lets you manage songs. It'll let you do all the CRUD things (that's create, read, update, and delete for the uninitiated) with songs. Plus, we'll add some cool search endpoints to the service as well. That way, we can find songs by an artist or genre, from a particular year, or with certain lyrics.

Test data for this problem was a little tricky. Most song lyrics are copyrighted and getting permission to use them for a little old tutorial wasn't really an option. And we definitely want to be able to search on song lyrics. How else are we going to find that song that goes "oooo ah ah ah ah"?

Fortunately, my buddy Dylan Beattie is literally the original Rockstar developer. In addition to coding cool things, he writes parody songs with tech themes. And, he has given me permission to use them as test data.

Humble Beginnings

We're using Redis as our database—that's the whole idea behind Redis OM. So, you'll need some Redis, specifically with Search and JSON installed. The easiest way to do this is to set up a free Redis Cloud instance. But, you can also use Docker:

$ docker run -p 6379:6379 redis/redis-stack:latest

I'm assuming you are relatively Node.js savvy so you should be able to get that installed on your own. We'll be using the top-level await feature for modules that was introduced in Node v14.8.0 so do make sure you have that version, or a newer one. If you don't, go and get it.

Once you have that, it's time to create a project:

$ npm init

Give it a name, version, and description. Use whatever you like. I called mine "Metalpedia".

Install Express and Redis OM for Node.js:

$ npm install express redis-om --save

And, just to make our lives easy, we'll use nodemon:

$ npm install nodemon --save-dev

Now that stuff is installed, let's set up some other details in our package.json. First, set the "type" to "module", so we can use ES6 Modules:

  "type": "module",

The "test" script that npm init generates isn't super useful for us. Replace that with a "start" script that calls nodemon. This will allow the service we build to restart automatically whenever we change a file. Very convenient:

  "scripts": {
"start": "nodemon server.js"
},

I like to make my packages private, so they don't accidentally get pushed to NPM:

  "private": true,

Oh, and you don't need the "main" entry. We're not building a package to share. So go ahead and remove that.

Now, you should have a package.json that looks something like this:

{
"name": "metalpedia",
"version": "1.0.0",
"description": "Sample application for building a music repository backed by Redis and Redis OM.",
"type": "module",
"scripts": {
"start": "nodemon server.js"
},
"author": "Guy Royse <guy@guyroyse.com> (http://guyroyse.com/)",
"license": "MIT",
"private": true,
"dependencies": {
"express": "^4.17.1",
"redis-om": "^0.2.0"
},
"devDependencies": {
"nodemon": "^2.0.14"
}
}

Excellent. Set up done. Let's write some code!

Getting the Express Service Up and Running

I like to write my services with a little version and name endpoint at the root. That way if some random developer hits the site of the service, they'll get a clue as to what it is. So let's do that:

Create a file named server.js in the root of your project folder and populate it thus:

import express from 'express';

// create an express app and use JSON
let app = new express();
app.use(express.json());

// setup the root level GET to return name and version from package.json
app.get('/', (req, res) => {
res.send({
name: process.env.npm_package_name,
version: process.env.npm_package_version,
});
});

// start listening
app.listen(8080);

We now have enough to actually run something. So let's run it:

$ npm start

Then, hit http://localhost:8080/ in your favorite browser. You should see something like this:

{
"name": "metalpedia",
"version": "1.0.0"
}

Or, hit your service using curl (and json_pp if you want to be fancy):

$ curl -X GET http://localhost:8080 -s | json_pp
{
"name": "metalpedia",
"version": "1.0.0"
}

Cool. Let's add some Redis.

Mapping Songs to Redis

We're going to use Redis OM to map data for a song from JSON data in Redis to JavaScript objects.

Create a file named song-repository.js in the root of your project folder. In it, import all the parts from Redis OM that you'll need:

import { Entity, Schema, Client, Repository } from 'redis-om';

Entities are the classes that you work with—the thing being mapped to. They are what you create, read, update, and delete. Any class that extends Entity is an entity. We'll define our Song entity with a single line for now, but we'll add some more to it later:

class Song extends Entity {}

Schemas define the fields on your entity, their types, and how they are mapped internally to Redis. By default, entities map to Hashes in Redis but we want ours to use JSON instead. When a Schema is created, it will add properties to the provided entity class based on the schema information provided. Here's a Schema that maps to our Song:

let schema = new Schema(Song, {
title: { type: 'string' }, // the title of the song
artist: { type: 'string' }, // who performed the song
genres: { type: 'string[]' }, // array of strings for the genres of the song
lyrics: { type: 'text' }, // the full lyrics of the song
music: { type: 'text' }, // who wrote the music for the song
year: { type: 'number' }, // the year the song was releases
duration: { type: 'number' }, // the duration of the song in seconds
link: { type: 'string' }, // link to a YouTube video of the song
});

Clients are used to connect to Redis. Create a Client and pass your Redis URL in the constructor. If you don't specify a URL, it will default to redis://localhost:6379. Clients have methods to .open, .close, and .execute raw Redis commands, but we're just going to open it:

let client = await new Client().open();

Remember that top-level await stuff I mentioned at the top of the document? There it is!

Now we have all the pieces that we need to create a Repository. Repositories are the main interface into Redis OM. They give us the methods to read, write, and remove entities. Create a repository—and make sure it's exported as you'll need it when we get into the Express stuff:

export let songRepository = client.fetchRepository(schema);

We're almost done with setting up our repository. But we still need to create an index or we won't be able to search on anything. We do that by calling .createIndex. If an index already exists and it's the same, this function won't do anything. If it is different, it'll drop it and create a new one. In a real environment, you'd probably want to create your index as part of CI/CD. But we'll just cram them into our main code for this example:

await songRepository.createIndex();

We have what we need to talk to Redis. Now, let's use it to make some routes in Express.

Using Redis OM for CRUD Operations

Let's create a truly RESTful API with the CRUD operations mapping to PUT, GET, POST, and DELETE respectively. We're going to do this using Express Routers as this makes our code nice and tidy. So, create a file called song-router.js in the root of your project folder. Then add the imports and create a Router:

import { Router } from 'express';
import { songRepository as repository } from './song-repository.js';

export let router = Router();

This router needs to be added in server.js under the /song path so let's do that next. Add the following line of code to at the top of server.js—with all the other imports—to import the song router:

import { router as songRouter } from './song-router.js';

Also add a line of code to call .use so that the router we are about to implement is, well, used:

app.use('/song', songRouter);

Our server.js should now look like this:

import express from 'express';
import { router as songRouter } from './song-router.js';

// create an express app and use JSON
let app = new express();
app.use(express.json());

// bring in some routers
app.use('/song', songRouter);

// setup the root level GET to return name and version from package.json
app.get('/', (req, res) => {
res.send({
name: process.env.npm_package_name,
version: process.env.npm_package_version,
});
});

// start listening
app.listen(8080);

Add a Create Route

Now, let's start putting some routes in our song-router.js. We'll create a song first as you need to have songs in Redis before you can do any of the reading, updating, or deleting of them. Add the PUT route below. This route will call .createEntity to create an entity, set all the properties on the newly created entity, and then call .save to persist it:

router.put('/', async (req, res) => {
// create the Song so we can save it
let song = repository.createEntity();

// set all the properties, converting missing properties to null
song.title = req.body.title ?? null;
song.artist = req.body.artist ?? null;
song.genres = req.body.genres ?? null;
song.lyrics = req.body.lyrics ?? null;
song.music = req.body.music ?? null;
song.year = req.body.year ?? null;
song.duration = req.body.duration ?? null;
song.link = req.body.link ?? null;

// save the Song to Redis
let id = await repository.save(song);

// return the id of the newly created Song
res.send({ id });
});

Now that we have a way to shove songs into Redis, let's start shoving. Out on GitHub, there are a bunch of JSON files with song data in them. (Thanks Dylan!) Go ahead and pull those down and place them in a folder under your project root called songs.

Let's use curl to load in a song. I'm partial to HTML, sung to the tune of AC/DC's Highway to Hell, so let's use that one:

$ curl -X PUT -H "Content-Type: application/json" -d "@songs/html.json" http://localhost:8080/song -s | json_pp

You should get back the ID of that newly inserted song:

{
"id": "01FKRW9WMVXTGF71NBEM3EBRPY"
}

We're shipping HTML indeed. If you have the redis-cli handy—or want to use RedisInsight—you can take a look and see how Redis has stored this:

> json.get Song:01FKRW9WMVXTGF71NBEM3EBRPY
"{\"title\":\"HTML\",\"artist\":\"Dylan Beattie and the Linebreakers\",\"genres\":[\"blues rock\",\"hard rock\",\"parody\",\"rock\"],\"lyrics\":\"W3C, RFC, a JIRA ticket and a style guide.\\\\nI deploy with FTP, run it all on the client side\\\\nDon\xe2\x80\x99t need Ruby, don\xe2\x80\x99t need Rails,\\\\nAin\xe2\x80\x99t nothing running on my stack\\\\nI\xe2\x80\x99m hard wired, for web scale,\\\\nYeah, I\xe2\x80\x99m gonna bring the 90s back\\\\n\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML,\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML\xe2\x80\xa6\\\\n\\\\nNo logins, no trackers,\\\\nNo cookie banners to ignore\\\\nI ain\xe2\x80\x99t afraid of, no hackers\\\\nJust the occasional 404\\\\nThey hatin\xe2\x80\x99, what I do,\\\\nBut that\xe2\x80\x99s \xe2\x80\x98cos they don\xe2\x80\x99t understand\\\\nMark it up, break it down,\\\\nRemember to escape your ampersands\xe2\x80\xa6\\\\n\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML,\\\\nI\xe2\x80\x99m shipping HTML,\\\\nHTML\xe2\x80\xa6\\\\n\\\\n(But it\xe2\x80\x99s really just markdown.)\",\"music\":\"\\\"Highway to Hell\\\" by AC/DC\",\"year\":2020,\"duration\":220,\"link\":\"https://www.youtube.com/watch?v=woKUEIJkwxI\"}"

Yep. Looks like JSON.

Add a Read Route

Create down, let's add a GET route to read this song from HTTP instead of using the redis-cli:

router.get('/:id', async (req, res) => {
// fetch the Song
let song = await repository.fetch(req.params.id);

// return the Song we just fetched
res.send(song);
});

Now you can use curl or your browser to load http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY to fetch the song:

$ curl -X GET http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s | json_pp

And you should get back the JSON for the song:

{
"link": "https://www.youtube.com/watch?v=woKUEIJkwxI",
"genres": ["blues rock", "hard rock", "parody", "rock"],
"entityId": "01FKRW9WMVXTGF71NBEM3EBRPY",
"title": "HTML",
"lyrics": "W3C, RFC, a JIRA ticket and a style guide.\\nI deploy with FTP, run it all on the client side\\nDon’t need Ruby, don’t need Rails,\\nAin’t nothing running on my stack\\nI’m hard wired, for web scale,\\nYeah, I’m gonna bring the 90s back\\n\\nI’m shipping HTML,\\nHTML,\\nI’m shipping HTML,\\nHTML…\\n\\nNo logins, no trackers,\\nNo cookie banners to ignore\\nI ain’t afraid of, no hackers\\nJust the occasional 404\\nThey hatin’, what I do,\\nBut that’s ‘cos they don’t understand\\nMark it up, break it down,\\nRemember to escape your ampersands…\\n\\nI’m shipping HTML,\\nHTML,\\nI’m shipping HTML,\\nHTML…\\n\\n(But it’s really just markdown.)",
"duration": 220,
"artist": "Dylan Beattie and the Linebreakers",
"music": "\"Highway to Hell\" by AC/DC",
"year": 2020
}

Now that we can read and write, let's implement the REST of the HTTP verbs. REST... get it?

Add an Update Route

Here's the code to update using a POST route. You'll note this code is nearly identical to the GET route. Feel free to refactor to a helper function but since this is just a tutorial, I'll skip that for now.:

router.post('/:id', async (req, res) => {
// fetch the Song we are replacing
let song = await repository.fetch(req.params.id);

// set all the properties, converting missing properties to null
song.title = req.body.title ?? null;
song.artist = req.body.artist ?? null;
song.genres = req.body.genres ?? null;
song.lyrics = req.body.lyrics ?? null;
song.music = req.body.music ?? null;
song.year = req.body.year ?? null;
song.duration = req.body.duration ?? null;
song.link = req.body.link ?? null;

// save the Song to Redis
let id = await repository.save(song);

// return the id of the Song we just saved
res.send({ id });
});

And the curl command to try it out, replacing Dylan's HTML with D.M.C.A.—sung to the tune of Y.M.C.A. by the Village People:

$ curl -X POST -H "Content-Type: application/json" -d "@songs/d-m-c-a.json" http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s | json_pp

You should get back the ID of that updated song:

{
"id" : "01FKRW9WMVXTGF71NBEM3EBRPY"
}

Add a Delete Route

And, finally, let's implement a DELETE route:

router.delete('/:id', async (req, res) => {
// delete the Song with its id
await repository.remove(req.params.id);

// respond with OK
res.type('application/json');
res.send('OK');
});

And test it out:

$ curl -X DELETE http://localhost:8080/song/01FKRW9WMVXTGF71NBEM3EBRPY -s
OK

This just returns "OK", which is technically JSON but aside from the response header, is indistinguishable from plain text.

Searching with Redis OM

All the CRUD is done. Let's add some search. Search is where Redis OM really starts to shine. We're going to create routes to:

  • Return all the songs, like, all of them.
  • Fetch songs for a particular artist, like "Dylan Beattie and the Linebreakers".
  • Fetch songs that are in a certain genre, like "rock" or "electronic".
  • Fetch songs between years, like all the songs from the 80s.
  • Fetch songs that have certain words in their lyrics, like "html" or "markdown".

Load Songs into Redis

Before we get started, let's load up Redis with a bunch of songs—so we have stuff to search for. I've written a short shell script that loads all the song data on GitHub into Redis using the server we just made. It just calls curl in a loop. It's on GitHub, so go grab it and put it in your project root. Then run it:

$ ./load-data.sh

You should get something like:

{"id":"01FM310A8AVVM643X13WGFQ2AR"} <- songs/big-rewrite.json
{"id":"01FM310A8Q07D6S7R3TNJB146W"} <- songs/bug-in-the-javascript.json
{"id":"01FM310A918W0JCQZ8E57JQJ07"} <- songs/d-m-c-a.json
{"id":"01FM310A9CMJGQHMHY01AP0SG4"} <- songs/enterprise-waterfall.json
{"id":"01FM310A9PA6DK4P4YR275M58X"} <- songs/flatscreens.json
{"id":"01FM310AA2XTEQV2NZE3V7K3M7"} <- songs/html.json
{"id":"01FM310AADVHEZXF7769W6PQZW"} <- songs/lost-it-on-the-blockchain.json
{"id":"01FM310AASNA81Y9ACFMCGP05P"} <- songs/meetup-2020.json
{"id":"01FM310AB4M2FKTDPGEEMM3VTV"} <- songs/re-bass.json
{"id":"01FM310ABFGFYYJXVABX2YXGM3"} <- songs/teams.json
{"id":"01FM310ABW0ANYSKN9Q1XEP8BJ"} <- songs/tech-sales.json
{"id":"01FM310AC6H4NRCGDVFMKNGKK3"} <- songs/these-are-my-own-devices.json
{"id":"01FM310ACH44414RMRHPCVR1G8"} <- songs/were-gonna-build-a-framework.json
{"id":"01FM310ACV8C72Y69VDQHA12C1"} <- songs/you-give-rest-a-bad-name.json

Note that this script will not erase any data. So any songs that you have in there already will still be there, alongside these. And if you run this script more than once, it will gleefully add the songs a second time.

Adding a Songs Router

Like with the CRUD operations for songs, we need to first create a router. This time we'll name the file songs-router.js. Note the plural. Add all the imports and exports to that file like before:

import { Router } from 'express';
import { songRepository as repository } from './song-repository.js';

export let router = Router();

Add this router to Express in server.js under /songs, also like we did before. And, again, note the plural. Your server.js should now look like this:

import express from 'express';
import { router as songRouter } from './song-router.js';
import { router as songsRouter } from './songs-router.js';

// create an express app and use JSON
let app = new express();
app.use(express.json());

// bring in some routers
app.use('/song', songRouter);
app.use('/songs', songsRouter);

// setup the root level GET to return name and version from package.json
app.get('/', (req, res) => {
res.send({
name: process.env.npm_package_name,
version: process.env.npm_package_version,
});
});

// start listening
app.listen(8080);

Add Some Search Routes

Now we can add some search routes. We initiate a search by calling .search on our repository. Then we call .where to add any filters we want—if we want any at all. Once we've specified the filters, we call .returnAll to get all the matching entities.

Here's the simplest search—it just returns everything. Go ahead and add it to songs-router.js:

router.get('/', async (req, res) => {
let songs = await repository.search().returnAll();
res.send(songs);
});

Then try it out with curl or your browser:

$ curl -X GET http://localhost:8080/songs -s | json_pp

We can search for a specific field by calling .where and .eq. This route finds all songs by a particular artist. Note that you must specify the complete name of the artist for this to work:

router.get('/by-artist/:artist', async (req, res) => {
let artist = req.params.artist;
let songs = await repository.search().where('artist').eq(artist).returnAll();
res.send(songs);
});

Then try it out with curl or your browser too:

$ curl -X GET http://localhost:8080/songs/by-artist/Dylan%20Beattie -s | json_pp

Genres are stored as an array of strings. You can use .contains to see if the array contains that genre or not:

router.get('/by-genre/:genre', async (req, res) => {
let genre = req.params.genre;
let songs = await repository
.search()
.where('genres')
.contains(genre)
.returnAll();
res.send(songs);
});

And try it out:

$ curl -X GET http://localhost:8080/songs/by-genre/rock -s | json_pp
$ curl -X GET http://localhost:8080/songs/by-genre/parody -s | json_pp

This route lets you get all the songs between two years. Great for finding all those 80s hits. Of course, all of Dylan's songs are more recent than that, so we'll go a little more narrow when we try it out:

router.get('/between-years/:start-:stop', async (req, res) => {
let start = Number.parseInt(req.params.start);
let stop = Number.parseInt(req.params.stop);
let songs = await repository
.search()
.where('year')
.between(start, stop)
.returnAll();
res.send(songs);
});

And, try it out, of course:

$ curl -X GET http://localhost:8080/songs/between-years/2020-2021 -s | json_pp

Let's add the final route to find songs that have certain words in the lyrics using .match:

router.get('/with-lyrics/:lyrics', async (req, res) => {
let lyrics = req.params.lyrics;
let songs = await repository
.search()
.where('lyrics')
.match(lyrics)
.returnAll();
res.send(songs);
});

We can try this out too, getting all the songs that contain both the words "html" and "markdown":

$ curl -X GET http://localhost:8080/songs/with-lyrics/html%20markdown -s | json_pp

Wrapping Up

And that's a wrap. I've walked you through some of the basics with this tutorial. But you should totally go deeper. If you want to learn more, go ahead and check out Redis OM for Node.js on GitHub. It explains the capabilities of Redis OM for Node.js in greater detail.

If you have any questions or are stuck, feel free to jump on the Redis Discord server and ask there. I'm always hanging out and happy to help.

And, if you find a flaw, bug, or just think this tutorial could be improved, send a pull request or open an issue.

Thanks!

- + \ No newline at end of file diff --git a/develop/php/index.html b/develop/php/index.html index b796bd0307..0f7ba512f6 100644 --- a/develop/php/index.html +++ b/develop/php/index.html @@ -4,7 +4,7 @@ PHPRedis - Redis client library for PHP | The Home of Redis Developers - + @@ -12,7 +12,7 @@

PHPRedis - Redis client library for PHP


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Find tutorials, examples and technical articles that will help you to develop with Redis and PHP.

Getting Started

In order to use Redis with PHP you will need a PHP Redis client. In the following sections, we will demonstrate the use of PhpRedis, a flexible and feature-complete Redis client library for PHP. Additional PHP clients for Redis can be found under the PHP section of the Redis Clients page.

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Step 1. Run a Redis server

You can either run Redis server in a Docker container or directly on your machine. Follow the below command to setup a Redis server locally on Mac OS:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
INFO

Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide.

Ensure that you are able to use the following Redis command to connect to the Redis instance.

 redis-cli -h localhost -p 6379
localhost>

Now you should be able to perform CRUD operations with Redis keys. The above Redis client command might require password if you have setup authentication in your Redis configuration file. If a Redis password is not set, then it will perform the default connection to Redis server. You can play around inserting data to Redis using SET and then fetching it back with the GET command.

Step 2. Get pecl

apt install pkg-php-tools

Step 3. Install PhpRedis

pecl install redis

Step 4. Opening a Connection to Redis Using PhpRedis

The following code creates a connection to Redis using PhpRedis

<?php

$redis = new Redis();
//Connecting to Redis
$redis->connect('hostname', port);
$redis->auth('password');

if ($redis->ping()) {
echo "PONG";
}

?>

Replace the following values with those of your database and save this file as connect.php.

Step 5. Executing the script

php connect.php

It should display "PONG" as output. You can verify this by running the monitor command

127.0.0.1:6379> monitor
OK
1614778301.165001 [0 [::1]:57666] "PING"

Further Reference:

Redis Launchpad
- + \ No newline at end of file diff --git a/develop/python/fastapi/index.html b/develop/python/fastapi/index.html index 4d51f7324f..08150e084f 100644 --- a/develop/python/fastapi/index.html +++ b/develop/python/fastapi/index.html @@ -4,7 +4,7 @@ Using Redis with FastAPI | The Home of Redis Developers - + @@ -29,7 +29,7 @@ features.

If you aren't familiar with asyncio, take a few minutes to watch this primer on asyncio before continuing:

Installing the Redis Client

We're going to start this tutorial assuming that you have a FastAPI project to work with. We'll use the IsBitcoinLit project for our examples.

Poetry is the best way to manage Python dependencies today, so we'll use it in this tutorial.

IsBitcoinLit includes a pyproject.toml file that Poetry uses to manage the project's directories, but if you had not already created one, you could do so like this:

    $ poetry init

Once you have a pyproject.toml file, and assuming you already added FastAPI and any other necessary dependencies, you could add aioredis-py to your project like this:

    $ poetry add aioredis@2.0.0
note

This tutorial uses aioredis-py 2.0. The 2.0 version of aioredis-py features an API that matches the most popular synchronous Redis client for Python, redis-py.

The aioredis-py client is now installed. Time to write some code!

Integrate aioredis-py with FastAPI

We're going to use Redis for a few things in this FastAPI app:

  1. Storing 30-second averages of sentiment and price for the last 24 hours with Redis Time Series
  2. Rolling up these averages into a three-hour snapshot with Redis Time Series
  3. Caching the three-hour snapshot

Let's look at each of these integration points in more detail.

Creating the time series

The data for our app consists of 30-second averages of Bitcoin prices and sentiment ratings for the last 24 hours. We pull these from the SentiCrypt API.

note

We have no affiliation with SentiCrypt or any idea how accurate these numbers are. This example is just for fun!

We're going to store price and sentiment averages in a time series with Redis Stack, so we want to make sure that when the app starts up, the time series exists.

We can use a startup event to accomplish this. Doing so looks like the following:

@app.on_event('startup')
async def startup_event():
keys = Keys()
await initialize_redis(keys)

We'll use the TS.CREATE command to create the time series within our initialize_redis() function:

async def make_timeseries(key):
"""
Create a timeseries with the Redis key `key`.

We'll use the duplicate policy known as "first," which ignores
duplicate pairs of timestamp and values if we add them.

Because of this, we don't worry about handling this logic
ourselves -- but note that there is a performance cost to writes
using this policy.
"""
try:
await redis.execute_command(
'TS.CREATE', key,
'DUPLICATE_POLICY', 'first',
)
except ResponseError as e:
# Time series probably already exists
log.info('Could not create time series %s, error: %s', key, e)

tip

When you create a time series, use the DUPLICATE_POLICY option to specify how to handle duplicate pairs of timestamp and values.

Storing Sentiment and Price Data in Redis

A /refresh endpoint exists in the app to allow a client to trigger a refresh of the 30-second averages. This is the entire function:

@app.post('/refresh')
async def refresh(background_tasks: BackgroundTasks, keys: Keys = Depends(make_keys)):
async with httpx.AsyncClient() as client:
data = await client.get(SENTIMENT_API_URL)
await persist(keys, data.json())
data = await calculate_three_hours_of_data(keys)
background_tasks.add_task(set_cache, data, keys)

As is often the case with Python, a lot happens in a few lines, so let's walk through them.

The first thing we do is get the latest sentiment and price data from SentiCrypt. The response data looks like this:

[
{
"count": 7259,
"timestamp": 1625592626.3452034,
"rate": 0.0,
"last": 0.33,
"sum": 1425.82,
"mean": 0.2,
"median": 0.23,
"btc_price": "33885.23"
}
//... Many more entries
]

Then we save the data into two time series in Redis with the persist() function. That ends up calling another helper, add_many_to_timeseries(), like this:

    await add_many_to_timeseries(
(
(ts_price_key, 'btc_price'),
(ts_sentiment_key, 'mean'),
), data,
)

The add_many_to_timeseries() function takes a list of (time series key, sample key) pairs and a list of samples from SentiCrypt. For each sample, it reads the value of the sample key in the SentiCrypt sample, like "btc_price," and adds that value to the given time eries key.

Here's the function:

async def add_many_to_timeseries(
key_pairs: Iterable[Tuple[str, str]],
data: BitcoinSentiments
):
"""
Add many samples to a single timeseries key.

`key_pairs` is an iteratble of tuples containing in the 0th position the
timestamp key into which to insert entries and the 1th position the name
of the key within th `data` dict to find the sample.
"""
partial = functools.partial(redis.execute_command, 'TS.MADD')
for datapoint in data:
for timeseries_key, sample_key in key_pairs:
partial = functools.partial(
partial, timeseries_key, int(
float(datapoint['timestamp']) * 1000,
),
datapoint[sample_key],
)
return await partial()

This code is dense, so let's break it down.

We're using the TS.MADD command to add many samples to a time series. We use TS.MADD because doing so is faster than TS.ADD for adding batches of samples to a time series.

This results in a single large TS.MADD call that adds price data to the price time series and sentiment data to the sentiment timeseries. Conveniently, TS.MADD can add samples to multiple time series in a single call.

Calculating Three-Hour Averages with Redis

Clients use IsBitcoinLit to get the average price and sentiment for each of the last three hours. But so far, we've only stored 30-second averages in Redis. How do we calculate the average of these averages for the last three hours?

When we run /refresh, we call calculate_three_hours_of_data() to do so. The function looks like this:

async def calculate_three_hours_of_data(keys: Keys) -> Dict[str, str]:
sentiment_key = keys.timeseries_sentiment_key()
price_key = keys.timeseries_price_key()
three_hours_ago_ms = int((now() - timedelta(hours=3)).timestamp() * 1000)

sentiment = await get_hourly_average(sentiment_key, three_hours_ago_ms)
price = await get_hourly_average(price_key, three_hours_ago_ms)

last_three_hours = [{
'price': data[0][1], 'sentiment': data[1][1],
'time': datetime.fromtimestamp(data[0][0] / 1000, tz=timezone.utc),
}
for data in zip(price, sentiment)]

return {
'hourly_average_of_averages': last_three_hours,
'sentiment_direction': get_direction(last_three_hours, 'sentiment'),
'price_direction': get_direction(last_three_hours, 'price'),
}

There is more going on here than we need to know for this tutorial. As a summary, most of this code exists to support calls to get_hourly_average().

That function is where the core logic exists to calculate averages for the last three hours, so let's see what it contains:

async def get_hourly_average(ts_key: str, top_of_the_hour: int):
response = await redis.execute_command(
'TS.RANGE', ts_key, top_of_the_hour, '+',
'AGGREGATION', 'avg', HOURLY_BUCKET,
)
# The response is a list of the structure [timestamp, average].
return response

Here, we use the TS.RANGE command to get the samples in the timeseries from the "top" of the hour three hours ago, until the latest sample in the series. With the AGGREGATE parameter, we get back the averages of the samples in hourly buckets.

So where does this leave us? With averages of the averages, one for each of the last three hours.

Caching Data with Redis

Let's review. We have code that achieves the following:

  1. Gets the latest sentiment and price data from SentiCrypt.
  2. Saves the data into two time series in Redis.
  3. Calculates the average of the averages for the last three hours.

The snapshot of averages for the last three hours is the data we want to serve clients when they hit the /is-bitcoin-lit endpoint. We could run this calculation every time a client requests data, but that would be inefficient. Let's cache it in Redis!

First, we'll look at writing to the cache. Then we'll see how FastAPI reads from the cache.

Writing Cache Data to Redis

Take a closer look at the last line of the refresh() function:

    background_tasks.add_task(set_cache, data, keys)

In FastAPI, you can run code outside of a web request after returning a response. This feature is called background tasks.

This is not as robust as using a background task library like Celery. Instead, Background Tasks are a simple way to run code outside of a web request, which is a great fit for things like updating a cache.

When you call add_task(), you pass in a function and a list of arguments. Here, we pass in set_cache(). This function saves the three-hour averages summary to Redis. Let's look at how it works:

async def set_cache(data, keys: Keys):
def serialize_dates(v):
return v.isoformat() if isinstance(v, datetime) else v

await redis.set(
keys.cache_key(),
json.dumps(data, default=serialize_dates),
ex=TWO_MINUTES,
)

First, we serialize the three-hour summary data to JSON and save it to Redis. We use the ex parameter to set the expiration time for the data to two minutes.

TIP: You need to provide a default serializer for the json.dumps() function so that dumps() knows how to serialize datetime objects.

This means that after every refresh, we've primed the cache. The cache isn't primed for long -- only two minutes -- but it's something!

Reading Cache Data to Redis

We haven't even seen the API endpoint that clients will use yet! Here it is:

@app.get('/is-bitcoin-lit')
async def bitcoin(background_tasks: BackgroundTasks, keys: Keys = Depends(make_keys)):
data = await get_cache(keys)

if not data:
data = await calculate_three_hours_of_data(keys)
background_tasks.add_task(set_cache, data, keys)

return data

To use this endpoint, clients make a GET request to /is-bitcoin-lit. Then we try to get the cached three-hour summary from Redis. If we can't, we calculate the three-hour summary, return it, and then save it outside of the web request.

We've already seen how calculating the summary data works, and we just explored saving the summary data to Redis. So, let's look at the get_cache() function, where we read the cached data:

def datetime_parser(dct):
for k, v in dct.items():
if isinstance(v, str) and v.endswith('+00:00'):
try:
dct[k] = datetime.datetime.fromisoformat(v)
except:
pass
return dct


async def get_cache(keys: Keys):
current_hour_cache_key = keys.cache_key()
current_hour_stats = await redis.get(current_hour_cache_key)

if current_hour_stats:
return json.loads(current_hour_stats, object_hook=datetime_parser)

Remember that when we serialized the summary data to JSON, we needed to provide a default serializer for json.dumps() that understood datetime objects. Now that we're deserializing that data, we need to give json.loads() an "object hook" that understands datetime strings. That's what datetime_parser() does.

Other than parsing dates, this code is relatively straightforward. We get the current hour's cache key, and then we try to get the cached data from Redis. If we can't, we return None.

Summary

Putting all the pieces together, we now have a FastAPI app that can retrieve Bitcoin price and sentiment averages, store the averages in Redis, cache three-hour summary data in Redis, and serve the data to clients. Not too shabby!

Here are a few notes to consider:

  1. We manually controlled caching in this tutorial, but you can also use a library like aiocache to cache data in Redis.
  2. We ran Redis commands like TS.MADD using the execute_command() method in aioredis-py. If you are instead using redis-py in a synchronous project, you can use the same commands.
- + \ No newline at end of file diff --git a/develop/python/redis-om/index.html b/develop/python/redis-om/index.html index b5ac628b03..9f9430a95c 100644 --- a/develop/python/redis-om/index.html +++ b/develop/python/redis-om/index.html @@ -4,7 +4,7 @@ Getting Started With Redis OM for Python | The Home of Redis Developers - + @@ -95,7 +95,7 @@ defined earlier. We'll add Field(index=True) to tell Redis OM that we want to index the last_name and age fields:

import datetime
from typing import Optional

from pydantic import EmailStr

from redis_om import (
Field,
get_redis_connection,
HashModel,
Migrator
)


class Customer(HashModel):
first_name: str
last_name: str = Field(index=True)
email: EmailStr
join_date: datetime.date
age: int = Field(index=True)
bio: Optional[str]


# Now, if we use this model with a Redis deployment that has the
# Redis Stack installed, we can run queries like the following.

# Before running queries, we need to run migrations to set up the
# indexes that Redis OM will use. You can also use the `migrate`
# CLI tool for this!
Migrator().run()


# Find all customers with the last name "Brookins"
Customer.find(Customer.last_name == "Brookins").all()

# Find all customers that do NOT have the last name "Brookins"
Customer.find(Customer.last_name != "Brookins").all()

# Find all customers whose last name is "Brookins" OR whose age is
# 100 AND whose last name is "Smith"
Customer.find((Customer.last_name == "Brookins") | (
Customer.age == 100
) & (Customer.last_name == "Smith")).all()

Next Steps

Now that you know the basics of working with Redis OM, start playing around with it in your project!

If you're a FastAPI user, check out how to integrate Redis OM with FastAPI.

- + \ No newline at end of file diff --git a/develop/ruby/index.html b/develop/ruby/index.html index eea84c0a85..27672b0db7 100644 --- a/develop/ruby/index.html +++ b/develop/ruby/index.html @@ -4,7 +4,7 @@ Ruby and Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ Additional Ruby clients for Redis can be found under the Ruby section of the Redis Clients page.

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis cache delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Step 1. Run a Redis server

You can either run Redis in a Docker container or directly on your machine. Follow the below commands to setup a Redis server on Mac OS:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
INFO

Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide.

Ensure that you are able to use the following Redis command to connect to the Redis instance.

 redis-cli
127.0.0.1:6379>

Now you should be able to perform CRUD operations with Redis commands. For example, you can insert data to Redis with the SET command and the fetch it with the GET command. The above Redis client command might require a password if you have setup authentication in your Redis configuration file. By default, Redis listens on port 6379. This can be modified in the Redis configuration file.

Step 2. Clone the repository

git clone https://github.com/redis-developer/redis-ruby-getting-started

Step 3. Install redis-rb:

$ gem install redis

OR

Step 4. Use Gemfile

$cat Gemfile
gem 'redis'

Step 5. Execute

$ bundle install
Resolving dependencies...
Using bundler 2.2.6
Using redis 4.2.5
Following files may not be writable, so sudo is needed:
/Library/Ruby/Gems/2.6.0
/Library/Ruby/Gems/2.6.0/build_info
/Library/Ruby/Gems/2.6.0/cache
/Library/Ruby/Gems/2.6.0/doc
/Library/Ruby/Gems/2.6.0/extensions
/Library/Ruby/Gems/2.6.0/gems
/Library/Ruby/Gems/2.6.0/specifications
Bundle complete! 1 Gemfile dependency, 2 gems now installed.
Use `bundle info [gemname]` to see where a bundled gem is installed.

Step 6. Verifying

bundle info redis
* redis (4.2.5)
Summary: A Ruby client library for Redis
Homepage: https://github.com/redis/redis-rb
Documentation: https://www.rubydoc.info/gems/redis/4.2.5
Source Code: https://github.com/redis/redis-rb/tree/v4.2.5
Changelog: https://github.com/redis/redis-rb/blob/master/CHANGELOG.md
Bug Tracker: https://github.com/redis/redis-rb/issues
Path: /Library/Ruby/Gems/2.6.0/gems/redis-4.2.5

Step 7. Opening a Connection to Redis Using redis-rb

The following code creates a connection to Redis using redis-rb:

require 'redis'
redis = Redis.new(host: "localhost", port: 6379, db: 11)
redis.set("mykey", "hello world")
redis.get("mykey")

To adapt this example to your code, make sure that you replace the following values with those of your database:

You can find connect.rb under this directory which you can directly use to test it.

Just execute the below command:

ruby connect.rb

Step 8. Verifying

127.0.0.1:6379> monitor
OK
1614684665.728109 [0 [::1]:50918] "select" "11"
1614684665.728294 [11 [::1]:50918] "set" "mykey" "hello world"
1614684665.728435 [11 [::1]:50918] "get" "mykey"

Redis Launchpad

Redis Launchpad is like an “App Store” for Redis sample apps. You can easily find apps for your preferred frameworks and languages. Check out a few of these apps below, or click here to access the complete list.

Rate-Limiting app in Ruby on Rails

Launchpad

Rate Limiting app built in Ruby on Rails

Leaderboard app in Ruby on Rails

Launchpad

How to implement leaderboard app in Ruby on Rails

Further References

- + \ No newline at end of file diff --git a/develop/rust/index.html b/develop/rust/index.html index c7809aa2a4..22ebab8bc9 100644 --- a/develop/rust/index.html +++ b/develop/rust/index.html @@ -4,7 +4,7 @@ Rust and Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ The web page “Redis Enterprise and Rust” will help you to get started with Redis Enterprise and Rust in a much easier manner. redis-rs is a rust implementation of a Redis client library. It exposes a general purpose interface to Redis and also provides specific helpers for commonly used functionality.

Step 1. Install Rust

 curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

Step 2. Configure your current shell:

 source $HOME/.cargo/env

Step 3. Verify Rust compiler:

 rustc --version
rustc 1.49.0

Step 4. Creating Cargo.toml with Redis dependency:

 [dependencies]
redis = "0.8.0"

Step 5. Clone the repository

 git clone https://github.com/redis-developer/redis-rust-getting-started

Step 6. Run the application

 cargo run

Further References

- + \ No newline at end of file diff --git a/ebooks/8-nosql-data-modeling-patterns/index.html b/ebooks/8-nosql-data-modeling-patterns/index.html index 24a8e1f927..333a58e855 100644 --- a/ebooks/8-nosql-data-modeling-patterns/index.html +++ b/ebooks/8-nosql-data-modeling-patterns/index.html @@ -4,7 +4,7 @@ Learn 8 NoSQL Data Modeling Patterns in Redis | The Home of Redis Developers - + @@ -52,7 +52,7 @@ that shows all the details of each item. There is a 1-to-1 relationship between each item in the list view and the detailed view (shown in Picture 2) of the item. The detailed view shows all the details such as multiple photos, description, manufacturer, dimensions, weight, and so on.

Picture 1Picture 1 1-to-1 List View
Picture 2Picture 2 1-to-1 Detailed View

1-to-1 Relationships using SQL

In a relational database, you may create a table called products where each row holds just enough data to display the information in the list view. Then, you may create another table called product_details where each row holds the rest of the details. You would also need a product_images table, where you store all of the images for a product. You can see the entity relationship diagram in Picture 3.

Picture 3Picture 3 1-to-1 Entity Diagram

Picture 3 depicts the entity relationships between products, product_details, and product_images and represents a normalized data model with a single denormalized field image in the products table. The reason for this is to avoid having to use a SQL JOIN when selecting the products for the list view. Using this model, the SQL query used to get the data needed for the list view might resemble Code Example 1.

Code Example 1
SELECT
p.id, p.name, p.image, p.price, pi.url
FROM
products p

1-to-1 Relationships using Redis

In Redis, similar to a relational database, you can create a collection called products and another called product_details. But with Redis JSON you can improve this by simply embedding product_images and product_details directly into the Products collection. Then, when you query the Products collection, specify which fields you need based on which view you are trying to create.

This will allow you to easily keep all the data in one place. This is called the Embedded Pattern and is one of the most common patterns you will see in NoSQL document databases like Redis JSON. Code Example 2 uses Python and a client library called Redis OM (an ORM for Redis) to model Products and ProductDetails. Note that ProductDetails is embedded into Products directly, so all of the data for a product will be stored within the same document.

Code Example 2
class ProductDetail(EmbeddedJsonModel):
description: str
manufacturer: str
dimensions: str
weight: str
images: List[str]

class Product(JsonModel):
name: str = Field(index=True)
image: str = Field(index=True)
price: int = Field(index=True)
details: Optional[ProductDetail]

Code Example 2 also shows how you can index fields using Redis OM and Redis Search. Doing this turns Redis into not only a document store but also a search engine since Redis Search enables secondary indexing and searching. When you create models using Redis OM, it will automatically manage secondary indexes with Redis Search on your behalf.

Using Redis OM we can write a function to retrieve our products list for the list view, as shown in Code Example 3.

Code Example 3
async def get_product_list():
results = await connections \
.get_redis_connection() \
.execute_command(
f'FT.SEARCH {Product.Meta.index_name} * LIMIT 0 10 RETURN 3 name image price'
)
return Product.from_redis(results)

Notice that in Code Example 3 we are using the FT.SEARCH command, which specifies the index managed on our behalf by Redis OM and returns three fields: name, image, and price. While the documents all have details and images embedded, we don’t want to display them in the list view so we don’t need to query them. When we want the detailed view, we can query an entire Product document. See Code Example 4 for how to query an entire document.

Code Example 4
async def get_product_details(product_id: str):
return await Product.get(product_id)

When using Redis, you can use RedisInsight as a GUI tool to visualize and interact with the data in your database. Picture 4 shows you what a Products document looks like.

Picture 4Picture 4 1-to-1 RedisInsight

Download the E-book

I’m sure you’re eager to learn more, so click here to download the full e-book.

- + \ No newline at end of file diff --git a/ebooks/three-caching-design-patterns/index.html b/ebooks/three-caching-design-patterns/index.html index 4c1d7ac8dc..25291f3f91 100644 --- a/ebooks/three-caching-design-patterns/index.html +++ b/ebooks/three-caching-design-patterns/index.html @@ -4,7 +4,7 @@ 3 design patterns to speed up MEAN and MERN stack applications | The Home of Redis Developers - + @@ -14,7 +14,7 @@

3 design patterns to speed up MEAN and MERN stack applications

Below you will find an excerpt from the e-book. Click here to download the full e-book.

3 design patterns to speed up MEAN and MERN stack applications

Introduction

If you don't design and build software with attention to performance, your applications can encounter significant bottlenecks when they go into production.

Over time, the development community has learned common techniques that work as reliable design patterns to solve well-understood problems, including application performance.

So what are design patterns? They are recommended practices to solve recurring design problems in software systems. A design pattern has four parts: a name, a problem description (a particular set of conditions to which the pattern applies), a solution (the best general strategy for resolving the problem), and a set of consequences.

Two development stacks that have become popular ways to build Node.js applications are the MEAN stack and the MERN stack. The MEAN stack is made up of the MongoDB database, the Express and Angular.js frameworks, and Node.js. It is a pure JavaScript stack that helps developers create every part of a website or application. In contrast, the MERN stack is made up of MongoDB, the Express and ReactJS frameworks, and Node.js.

Both stacks work well, which accounts for their popularity. But it doesn't mean the software generated runs as fast as it can—or as fast as it needs to.

In this post, we share one popular design pattern that developers use with Redis to improve application performance with MEAN and MERN stack applications: the master data-lookup pattern. We explain the pattern in detail and accompany it with an overview, typical use cases, and a code example. Our intent is to help you understand when and how to use this particular pattern in your own software development. The Ebook has other patterns too like The cache-aside pattern and The write-behind pattern

Building a movie application

The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

This tutorial uses a GitHub sample demo that was built using the following tools:

  • Frontend: ReactJS (18.2.0)
  • Backend: Node.js (16.17.0)
  • Database: MongoDB
  • Cache and database: Redis stack (using Docker)
GITHUB CODE

Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

The master data-lookup pattern

One ongoing developer challenge is to (swiftly) create, read, update, and (possibly) delete data that lives long, changes infrequently, and is regularly referenced by other data, directly or indirectly. That's a working definition of master data, especially when it also represents the organization's core data that is considered essential for its operations.

Master data generally changes infrequently. Country lists, genres, and movie languages usually stay the same. That presents an opportunity to speed things up. You can address access and manipulation operations so that data consistency is preserved and data access happens quickly.

From a developer's point of view, master data lookup refers to the process by which master data is accessed in business transactions, in application setup, and any other way that software retrieves the information. Examples of master data lookup include fetching data for user interface (UI) elements (such as drop-down dialogs, select values, multi-language labels), fetching constants, user access control, theme, and other product configuration. And you can do that even when you rely primarily on MongoDB as a persistent data store.

pattern

To serve master data from Redis, preload the data from MongoDB.

  1. Read the master data from MongoDB on application startup and store a copy of the data in Redis. This pre-caches the data for fast retrieval. Use a script or a cron job to repeatedly copy master data to Redis.
  2. The application requests master data.
  3. Instead of MongoDB serving the data, the master data will be served from Redis.

Use cases

Consider this pattern when you need to

  • Serve master data at speed: By definition, nearly every application requires access to master data. Pre-caching master data with Redis delivers it to users at high speed.
  • Support massive master tables: Master tables often have millions of records. Searching through them can cause performance bottlenecks. Use Redis to perform real-time search on the master data to increase performance with sub-millisecond response.
  • Postpone expensive hardware and software investments: Defer costly infrastructure enhancements by using Redis. Get the performance and scaling benefits without asking the CFO to write a check.

Demo

The image below illustrates a standard way to showcase a UI that is suitable for master data lookups. The developer responsible for this application would treat certain fields as master data, including movie language, country, genre, and ratings, because they are required for common application transactions.

Consider the pop-up dialog that appears when a user who wants to add a new movie clicks the movie application plus the icon. The pop-up includes drop-down menus for both country and language. In this demonstration, Redis loads the values.

demo-03

Code

The two code blocks below display a fetch query of master data from both MongoDB and Redis that loads the country and language drop-down values.

Previously, if the application used MongoDB, it searched the static database to retrieve the movie's country and language values. That can be time-consuming if it's read from persistent storage—and is inefficient if the information is static.

*** BEFORE (MongoDB)***
*** MongoDB regular search query ***
function getMasterCategories() {
...
db.collection("masterCategories").find({
statusCode: {
$gt: 0,
},
category: {
$in: ["COUNTRY", "LANGUAGE"],
},
});
...
}

Instead, the “after” views in the code blocks show that the master data can be accessed with only a few lines of code—and much faster response times.

*** AFTER (Redis) ***
*** Redis OM Node query ***
function getMasterCategories() {
...
masterCategoriesRepository
.search()
.where("statusCode")
.gt(0)
.and("categoryTag")
.containOneOf("COUNTRY", "LANGUAGE");
...
}

Download the E-book

Sensing a pattern here? The master data-lookup pattern is not the only design pattern you can use to improve application performance.

I’m sure you’re eager to learn more, so click here to download the full e-book.

- + \ No newline at end of file diff --git a/explore/datadog/index.html b/explore/datadog/index.html index 6f6291b789..5bb9507481 100644 --- a/explore/datadog/index.html +++ b/explore/datadog/index.html @@ -4,7 +4,7 @@ Redis Enterprise Observability with Datadog | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Redis Enterprise Observability with Datadog


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis
Profile picture for Christian Mague
Author:
Christian Mague, Former Principal Field Engineer at Redis

Datadog

Devops and SRE practitioners are already keenly aware of the importance of system reliability, as it’s one of the shared goals in every high performing organization. Defining clear reliability targets based on solid data is crucial for productive collaboration between developers and SREs. This need spans the entire infrastructure from application to backend database services.

Service Level Objectives (SLOs) provide a powerful interface for all teams to set clear performance and reliability goals based on Service Level Indicators (SLIs) or data points. A good model is to think of the SLIs as the data and the SLO as the information one uses to make critical decisions.

Further Read: https://cloud.google.com/blog/products/devops-sre/sre-fundamentals-slis-slas-and-slos

Redis

Redis is a popular multi-model NoSQL database server that provides in-memory data access speeds for search, messaging, streaming, caching, and graph—amongst other capabilities. Highly performant sites such as Twitter, Snapchat, Freshworks, GitHub, Docker, Pinterest, and Stack Overflow all look to Redis to move data in real time.

Redis SLOs can be broken down into three main categories:

CategoryDefinitionExample SLOExample SLI
ThroughputNumber of operations being pushed through the service in a given time periodSystem should be capable of performing 200M operations per secondredisenterprise.total_req
LatencyElapsed time it takes for an operationAverage write latency should not exceed 1 millisecondredis_enterprise.avg_latency
CapacityMemory/storage/network limits of the underlying data sourceDatabase should have 20% memory overhead available to handle burstsredisenterprise.used_memory_percent

Why Datadog?

Running your own performance data platform is time consuming and difficult. Datadog provides an excellent platform with an open source agent to collect metrics and allows them to be displayed easily and alerted upon when necessary.

Datadog allows you to:

  • Collect metrics from various infrastructure components out of the box
  • Display that data in easy to read dashboards
  • Monitoring performance metrics and alerting accordingly
  • Correlate log entries with metrics to quickly drill down to root causes
  • Distributed tracing

Key Performance Indicators

1. Latency

Definition

redisenterprise.avg_latency (unit: microseconds)

This is the average amount of time that a request takes to return from the time that it first hits the Redis Enterprise proxy until the response is returned. It does not include the full time from the remote client’s perspective.

Characteristics

Since Redis is popular due to performance, generally you would expect most operations to return in single digit milliseconds. Tune any alerts to match your SLA. It’s generally recommended that you also measure Redis operation latency at the client side to make it easier to determine if a server slow down or an increase in network latency is the culprit in any performance issues.

Possible Causes
CauseFactors
Spike in requestsCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
Slow-running queriesCheck the slow logs in the Redis Enterprise UI for the database
Insufficient compute resourcesCheck to see if the CPU Usage, Memory Usage Percentage, or Evictions are increasing
Remediation
ActionMethod
Increase resourcesThe database can be scaled up online by going to the Web UI and enabling clustering on the database. In extreme cases, more nodes can be added to the cluster and resources rebalanced
Inefficient queriesRedis allows you to view slow logs with a tunable threshold. It can be viewed either in the Redis Enterprise UI or by running: redis-cli -h HOST -p PORT -a PASSWORD SLOWLOG GET 100

2. Memory Usage Percentage

Definition

redisenterprise.memory_usage_percent (unit: percentage)

This is the percentage of used memory over the memory limit set for the database.

Characteristics

In Redis Enterprise, all databases have a maximum memory limit set to ensure isolation in a multi-tenant environment. This is also highly recommended when running open source Redis. Be aware that Redis does not immediately free memory upon key deletion. Depending on the size of the database, generally between 80-95% is a safe threshold.

Possible Causes
CauseFactors
Possible spike in activityCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
Database sized incorrectlyView the Memory Usage raw bytes over time to see if a usage pattern has changed
Incorrect retention policiesCheck to see if keys are being Evicted or Expired
Remediation
ActionMethod
Increase resourcesThe database memory limit can be raised online with no downtime through either the Redis Enterprise UI or the API
Retention PolicyIn a caching use case, setting a TTL for unused data to expire is often helpful. In addition, Eviction policies can be set, however, these may often not be able to keep up in extremely high throughput environments with very tight resource constraints

3. Cache Hit Rate

Definition
redisenterprise.cache_hit_rate (unit: percent)

This is the percentage of time that Redis is accessing a key that already exists.

Characteristics

This metric is useful only in the caching use case and should be ignored for all other use cases. There are tradeoffs between the freshness of the data in the cache and efficacy of the cache mitigating traffic to any backend data service. These tradeoffs should be considered carefully when determining the threshold for alerting.

Possible Causes

This is highly specific to the application caching with no general rules that are applicable in the majority of cases.

Remediation

Note that Redis commands return information on whether or not a key or field already exists. For example, the HSET command returns the number of fields in the hash that were added.

4. Evictions

Definition
redisenterprise.evicted_objects (unit: count)

This is the count of items that have been evicted from the database.

Characteristics

Eviction occurs when the database is close to capacity. In this condition, the eviction policy starts to take effect. While Expiration is fairly common in the caching use case, Eviction from the cache should generally be a matter of concern. At very high throughput and very restricted resource use cases, sometimes the eviction sweeps cannot keep up with memory pressure. Relying on Eviction as a memory management technique should be considered carefully.

Possible Causes

See Memory Usage Percentage Possible Causes

Remediation

See Memory Usage Percentage Remediation

Secondary Indicators

1. Network Traffic

Definition
redisenterprise.ingress_bytes/redisenterprise.egress_bytes (unit: bytes)

Counters for the network traffic coming into the database and out from the database.

Characteristics

While these two metrics will not help you pinpoint a root cause, network traffic is an excellent leading indicator of trouble. Changes in network traffic patterns indicate corresponding changes in database behavior and further investigation is usually warranted.

2. Connection Count

Definition
redisenterprise.conns (unit: count)

The number of current client connections to the database.

Characteristics

This metric should be monitored with both a minimum and maximum number of connections. The minimum number of connections not being met is an excellent indicator of either networking or application configuration errors. The maximum number of connections being exceeded may indicate a need to tune the database.

Possible Causes
CauseFactors
Minimum clients not metIncorrect client configuration, network firewall, or network issues
Maximum connections exceededClient library is not releasing connections or an increase in the number of clients
Remediation
ActionMethod
Clients MisconfiguredConfirm client configurations
Networking issueIssue the PING command from a client node TELNET to the endpoint
Too many connectionsBe sure that you are using pooling on your client library and that your pools are sized accordingly
Too many connectionsUsing rladmin, run: tune proxy PROXY_NUMBER threads VALUE threads VALUE

You can access the complete list of metrics here.

Getting Started

Follow the steps below to set up the Datadog agent to monitor your Redis Enterprise cluster, as well as database metrics:

Quickstart Guide:

Prerequisites:

  • Follow this link to setup your Redis Enterprise cluster and database
  • Setup a Read-only user account by logging into your Redis Enterprise instance and visiting the “Access Control” section

alt_text

  • Add a new user account with Cluster View Permissions.

alt_text

Step 1. Set Up a Datadog Agent

Before we jump into the installation, let’s look at the various modes that you can run the Datadog agent in:

  • External Monitor Mode
  • Localhost Mode

External Monitor Mode

alt_text

In external monitor mode, a Datadog agent running outside of the cluster can monitor multiple Redis Enterprise clusters, as shown in the diagram above.

Localhost Mode

Using localhost mode, the integration can be installed on every node of a Redis Enterprise cluster. This allows the user to correlate OS level metrics with Redis-specific metrics for faster root cause analysis. Only the Redis Enterprise cluster leader will submit metrics and events to Datadog. In the event of a migration of the cluster leader, the new cluster leader will begin to submit data to Datadog.

alt_text

For this demo, we will be leveraging localhost mode as we just have two nodes to configure.

Step 2. Launch the Datadog agent on the Master node

Pick up your preferred OS distribution and install the Datadog agent

alt_text

Run the following command to install the integration wheel with the Agent. Replace the integration version with 1.0.1.

 datadog-agent integration install -t datadog-redisenterprise==<INTEGRATION_VERSION>

Step 3. Configuring Datadog configuration file

Copy the sample configuration and update the required sections to collect data from your Redis Enterprise cluster:

For Localhost Mode

The following minimal configuration should be added to the Enterprise Master node.

 sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
 #################################################################
# Base configuration
init_config:

instances:
- host: localhost
username: user@example.com
password: secretPassword
port: 9443

Similarly, you need to add the edit the configuration file for the Enterprise Follower to add the following:

 sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
  #################################################################
# Base configuration
init_config:

instances:
- host: localhost
username: user@example.com
password: secretPassword
port: 9443

For External Monitor Mode

The following configuration should be added to the Monitor node

#  Base configuration
init_config:

instances:
- host: cluster1.fqdn
username: user@example.com
password: secretPassword
port: 9443

- host: cluster2.fqdn
username: user@example.com
password: secretPassword
port: 9443

Step 4. Restart the Datadog Agent service

 sudo service datadog-agent restart

Step 5. Viewing the Datadog UI

Find the Redis Enterprise Integration under the Integration Menu:

alt_text

Displaying the host reporting data to Datadog:

alt_text

Listing the Redis Enterprise dashboards:

alt_text

Host details under Datadog Infrastructure list:

alt_text

Datadog dashboard displaying host metrics of the 1st host (CPU, Memory Usage, Load Average etc):

alt_text

Datadog dashboard displaying host metrics of the 2nd host:

alt_text

Step 6. Verifying the Datadog Agent Status

Running the datadog-agent command shows that the Redis Enterprise integration is working correctly.

 sudo datadog-agent status
 redisenterprise (1.0.1)
-----------------------
Instance ID: redisenterprise:ef4cd60aadac5744 [OK]
Configuration Source: file:/etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
Total Runs: 2
Metric Samples: Last Run: 0, Total: 0
Events: Last Run: 0, Total: 0
Service Checks: Last Run: 0, Total: 0
Average Execution Time : 46ms
Last Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)
Last Successful Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)

Redis Enterprise Cluster Top View

alt_text

Let’s run a memory benchmark tool called redis-benchmark to simulate an arbitrary number of clients connecting at the same time and performing actions on the server, measuring how long it takes for the requests to be completed.

 memtier_benchmark --server localhost -p 19701 -a password
[RUN #1] Preparing benchmark client...
[RUN #1] Launching threads now...

alt_text

This command instructs memtier_benchmark to connect to your Redis Enterprise database and generates a load doing the following:

  • Write objects only, no reads.
  • Each object is 500 bytes.
  • Each object has random data in the value.
  • Each key has a random pattern, then a colon, followed by a random pattern.

Run this command until it fills up your database to where you want it for testing. The easiest way to check is on the database metrics page.

 memtier_benchmark --server localhost -p 19701 -a Oracle9ias12# -R -n allkeys -d 500 --key-pattern=P:P --ratio=1:0
setting requests to 50001
[RUN #1] Preparing benchmark client...
[RUN #1] Launching threads now...

alt_text

The Datadog Events Stream shows an instant view of your infrastructure and services events to help you troubleshoot issues happening now or in the past. The event stream displays the most recent events generated by your infrastructure and the associated monitors, as shown in the diagram below.

alt_text

References:

- + \ No newline at end of file diff --git a/explore/import/index.html b/explore/import/index.html index de39617c3d..d4be8d9ea7 100644 --- a/explore/import/index.html +++ b/explore/import/index.html @@ -4,7 +4,7 @@ How to Import data into a Redis database | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Import data into a Redis database


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Redis offers multiple ways to import data into a database; from an file, an script or from an existing Redis database.

Import using redis-cli script

  1. Create a simple file users.redis with all the commands you want to run

    HSET 'user:001' first_name 'John' last_name 'doe' dob '12-JUN-1970'
    HSET 'user:002' first_name 'David' last_name 'Bloom' dob '03-MAR-1981'
  2. Use the redis-cli tool to execute the script

    redis-cli -h localhost -p 6379 < users.redis

This approach will only run the commands and will not impact the existing data, except if you modify existing keys in the script.

Sample dataset: You can find sample dataset ready to be imported using this method in the https://github.com/redis-developer/redis-datasets repository.


Restore an RDB file

If you have an RDB file dump.rdb that contains the data you want you can use this file to create a new database

  1. Copy the dump.rdb file into the Redis working directory

    If you do not know what it is folder you can run the command CONFIG get dir where your Redis instance is up and running

  2. Start the Redis service with the redis-server

  3. The file dump.rdb is automatically imported.

  4. Connect to the database using redis-cli or any other client, to check that data have been imported. (for example SCAN)

- Warning: Importing data erases all existing content in the database.


Import & Synchronize using RIOT

Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

  • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...
  • RIOT File: bulk import/export data from/to files.
  • RIOT Gen: generate sample Redis datasets for new feature development and proof of concept.
  • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
  • RIOT Stream: import/export messages from/to Kafka topics.

Import data into Redis Enterprise

You can easily import data into Redis Enterprise and Redis Enterprise Cloud, take a look to the following documentation:

- + \ No newline at end of file diff --git a/explore/index.html b/explore/index.html index 4fb51b420f..2d74ac93a6 100644 --- a/explore/index.html +++ b/explore/index.html @@ -4,7 +4,7 @@ Explore Your Data | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Explore Your Data

The following links provides you with the available options to explore a new Redis database either on the Cloud or using local software.

An out-of-the-box predefined build Grafana dashboard for Redis
A set of import/export command line utilities for Redis
- + \ No newline at end of file diff --git a/explore/redisdatasource/index.html b/explore/redisdatasource/index.html index a8d093220a..4be60fcbd6 100644 --- a/explore/redisdatasource/index.html +++ b/explore/redisdatasource/index.html @@ -4,7 +4,7 @@ How to add Redis as a datasource in Grafana and build customize dashboards for Analytics | The Home of Redis Developers - + @@ -16,7 +16,7 @@ In our case, we will be using redis-datasource.

 docker run -d -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-datasource" grafana/grafana

Step 3. Accessing the grafana dashboard

Open https://IP:3000 to access grafana. The default username/password is admin/admin.

grafana

Step 4. Click "Configuration"

grafana

Step 5. Add Redis as a Data Source

grafana

Step 6. Select "Redis" as data source type

grafana

Step 7. Add Redis Database name, Endpoint URL and password

Assuming that you already have Redis server and database up and running in your infrastructure. You can also leverage Redis Enterprise Cloud as showcased in the below example.

grafana

Step 8. Click "Import" under Dashboard

grafana

Step 9.Access the Redis datasource Dashboard

grafana

Supported commands

Data Source supports various Redis commands using custom components and provides a unified interface to query any command.

Query

Further References

- + \ No newline at end of file diff --git a/explore/riot/index.html b/explore/riot/index.html index f0757d8e42..8949e6bacb 100644 --- a/explore/riot/index.html +++ b/explore/riot/index.html @@ -4,7 +4,7 @@ RIOT | The Home of Redis Developers - + @@ -14,7 +14,7 @@

RIOT


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

  • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
  • RIOT DB: migrate from an RDBMS to Redis

Using RIOT Redis

Most database migration tools available today are offline in nature. Migrating data from AWS ElastiCache to Redis Enterprise Cloud for example means backing up your Elasticache data to an AWS S3 bucket and importing it into Redis Enterprise Cloud using its UI.This implies some downtime and might result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server & applying the changes to the destination servers to keep both servers in sync. It might sound like a good approach but can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

RIOT Redis is a migration tool that allows for seamless live replication between two Redis databases.

Step 1. Getting Started

Download the latest release and unzip the archive.

Launch the bin/riot-redis script and follow the usage information provided.

Step 2. Build and Run

git clone https://github.com/redis-developer/riot.git
cd riot/riot-redis
./riot-redis

Step 3. Install via Homebrew (macOS)

brew install jruaux/tap/riot-redis`

Usage

❯ riot-redis
Usage: {app} [OPTIONS] [COMMAND]
--help Show this help message and exit.
-V, --version Print version information and exit.
-q, --quiet Log errors only
-d, --debug Log in debug mode (includes normal stacktrace)
-i, --info Set log level to info

You can use --help on any subcommand:

❯ riot-redis --help

❯ riot-redis import --help

❯ riot-redis import .. hset --help

Redis connection options are the same as redis-cli:

  -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
-p, --port=<port> Server port (default: 6379)
-s, --socket=<socket> Server socket (overrides hostname and port)
--user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
-a, --pass[=<password>] Password to use when connecting to the server
-u, --uri=<uri> Server URI
-o, --timeout=<sec> Redis command timeout (default: 60)
-n, --db=<int> Database number (default: 0)
-c, --cluster Enable cluster mode
-t, --tls Establish a secure TLS connection
-l, --latency Show latency metrics
-m, --pool=<int> Max pool connections (default: 8)

Redis URI syntax is described here.

Step 4. Example

Here is an example of a live replication from a source Redis running on localhost and port 6379, to a target Redis running on localhost and port 6380:

❯ riot-redis -h source -p 6379 replicate --idle-timeout 500 -h target -p 6380 --live

Step 5. Verification

Once replication is complete RIOT Redis will perform a verification step to compare values and TTLs between source and target databases. The output looks like this:

OK:1000 V:0 >:0 <:0 T:0
  • OK: # identical values

  • V: # mismatched values

  • : # keys only present in source database

  • <: # keys only present in target database

  • T: # keys with TTL difference greater than tolerance

Step 6. Architecture

RIOT Redis implements client-side replication using a producer/consumer approach:

  • the producer is connected to the source Redis (e.g. ElastiCache) and iterates over keys to read their corresponding values

  • the consumer is connected to the target Redis (e.g. Redis Enterprise Cloud) and writes the key/value tuples previously created

  1. Key reader: initiates a SCAN and optionally calls SUBSCRIBE to listen for keyspace notifications (live replication).
  2. Value reader: takes the keys and calls DUMP and TTL.
  3. Key/Value writer: takes key/value/ttl tuples and calls RESTORE and EXPIRE.
note

Live replication makes use of keyspace notifications. Make sure the source Redis database has keyspace notifications enabled using notify-keyspace-events = KA in redis.conf or via CONFIG SET.

note

The live replication mechanism does not guarantee data consistency. Redis sends keyspace notifications over pub/sub which does not provide guaranteed delivery. It is possible that RIOT Redis can miss some notifications in case of network failures for example.

Using RIOT DB

RIOT DB lets you import/export data from relational databases.

Step 1. Getting Started

Download the latest release and unzip the archive.

Launch the bin/riot-db script and follow the usage information provided.

Step 2. Build and Run

❯ git clone https://github.com/redis-developer/riot.git
❯ cd riot/riot-db
❯ ./riot-db

Step 3. Install via Homebrew (macOS)

brew install jruaux/tap/riot-db

Step 4. Usage

❯ riot-db
Usage: riot-db [OPTIONS] [COMMAND]
--help Show this help message and exit.
-V, --version Print version information and exit.
-q, --quiet Log errors only
-d, --debug Log in debug mode (includes normal stacktrace)
-i, --info Set log level to info

You can use --help on any subcommand:

❯ riot-db --help
❯ riot-db import --help
❯ riot-db import … hset --help

Redis connection options are the same as redis-cli:

  -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
-p, --port=<port> Server port (default: 6379)
-s, --socket=<socket> Server socket (overrides hostname and port)
--user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
-a, --pass[=<password>] Password to use when connecting to the server
-u, --uri=<uri> Server URI
-o, --timeout=<sec> Redis command timeout (default: 60)
-n, --db=<int> Database number (default: 0)
-c, --cluster Enable cluster mode
-t, --tls Establish a secure TLS connection
-l, --latency Show latency metrics
-m, --pool=<int> Max pool connections (default: 8)

Step 5. Drivers

RIOT DB includes drivers for the most common RDBMSs:

Oracle

jdbc:oracle:thin:@myhost:1521:orcl

IBM Db2

jdbc:db2://host:port/database

MS SQL Server

jdbc:sqlserver://[serverName[\instanceName][:portNumber]][;property=value[;property=value]]

MySQL

jdbc:mysql://[host]:[port][/database][?properties]

PostgreSQL

jdbc:postgresql://host:port/database

SQLite

jdbc:sqlite:sqlite_database_file_path

For non-included databases you must install the corresponding JDBC driver under the lib directory and modify the RIOT DB CLASSPATH:

*nix: bin/riot-db → CLASSPATH=$APP_HOME/lib/myjdbc.jar:$APP_HOME/lib/…
Windows: bin{app}.bat → set CLASSPATH=%APP_HOME%\lib\myjdbc.jar;%APP_HOME%\lib\…

Step 6. Import

Use the import command to import the result set of a SQL statement.

Import from PostgreSQL

❯ riot-db -h localhost -p 6379 import "SELECT * FROM orders" --url jdbc:postgresql://host:port/database --username appuser --password passwd hset --keyspace order --keys order_id

You can specify one or many Redis commands as targets of the import:

Import into hashes

❯ riot-db import .. set --keyspace blah --keys id

Import into hashes and set TTL on the key

❯ riot-db import .. hset --keyspace blah --keys id expire --keyspace blah --keys id

Import into hashes and set TTL and add to a set named myset

❯ riot-db import .. hset --keyspace blah --keys id expire --keyspace blah --keys id sadd --keyspace myset --members id

Step 7. Export

Export to PostgreSQL

❯ riot-db export "INSERT INTO mytable (id, field1, field2) VALUES (CAST(:id AS SMALLINT), :field1, :field2)" --url jdbc:postgresql://host:port/database --username appuser --password passwd --scan-match "hash:*" --key-regex "hash:(?<id>.*)"

Import from PostgreSQL to JSON strings

❯ riot-db -h localhost -p 6379 import "SELECT * FROM orders" --url jdbc:postgresql://host:port/database --username appuser --password passwd set --keyspace order --keys order_id

This will produce Redis strings that look like this:

{
"order_id": 10248,
"customer_id": "VINET",
"employee_id": 5,
"order_date": "1996-07-04",
"required_date": "1996-08-01",
"shipped_date": "1996-07-16",
"ship_via": 3,
"freight": 32.38,
"ship_name": "Vins et alcools Chevalier",
"ship_address": "59 rue de l'Abbaye",
"ship_city": "Reims",
"ship_postal_code": "51100",
"ship_country": "France"
}

Further References

- + \ No newline at end of file diff --git a/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html b/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html index aff95cc987..997a3f7677 100644 --- a/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html +++ b/guide/security/how-to-use-ssl-tls-with-redis-enterprise/index.html @@ -4,7 +4,7 @@ How to Use SSL/TLS With Redis Enterprise | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to Use SSL/TLS With Redis Enterprise


Profile picture for Tugdual Grall
Author:
Tugdual Grall, Former Technical Marketing Manager at Redis

Header

In this article, I will explain how to secure your Redis databases using SSL (Secure Sockets Layer). In production, it is a good practice to use SSL to protect the data that are moving between various computers (client applications and Redis servers). Transport Level Security (TLS) guarantees that only allowed applications/computers are connected to the database, and also that data is not viewed or altered by a middle man process.

You can secure the connections between your client applications and Redis cluster using:

  • One-Way SSL: the client (your application) get the certificate from the server (Redis cluster), validate it, and then all communications are encrypted
  • Two-Way SSL: (aka mutual SSL) here both the client and the server authenticate each other and validate that both ends are trusted.

In this article, I will focus on the Two-Way SSL, and using Redis Enterprise.

Prerequisites:

  • A Redis Enterprise 6.0.x database, (my database is protected by the password secretdb01, and listening on port 12000)
  • redis-cli to run basic commands
  • Python, Node, and Java installed if you want to test various languages.

Simple Test

Let's make sure that the database is available:

redis-cli -p 12000 -a secretdb01 INFO SERVER

This should print the Server information.

1- Get the Certificate from Redis Cluster

You have access to the Redis Enterprise Cluster, you go to one of the nodes to retrieve the certificate (that is a self-generated one by default).

The cluster certificate is located at: /etc/opt/redislabs/proxy_cert.pem.

You have to copy it on each client machine; note that once it is done you can use this certificate to connect using "One-Way SSL", but not the purpose of this article.

In my demonstration I am using Docker and copy the certificate using this command from my host:

docker cp redis-node1:/etc/opt/redislabs/proxy_cert.pem ./certificates

2- Generate a New Client Certificate

Using the Two-Way SSL you need to have a certificate for the client that will be used by Redis database proxy to trust the client.

In this article I will use a self-signed certificate using OpenSSL, in this example, we are creating a certificate for an application named app_001.

You can create as many certificates as you want, or reuse this one for all servers/applications.

Open a terminal and run the following commands:


openssl req \
-nodes \
-newkey rsa:2048 \
-keyout client_key_app_001.pem \
-x509 \
-days 36500 \
-out client_cert_app_001.pem

This command generate a new client key (client_key_001.pem) and certificate (client_cert_001.pem) with no passphrase.

3- Configure the Redis Database

The next step is to take the certificate and add it to the database you want to protect.

Let's copy the certificate and paste it into the Redis Enterprise Web Console.

Copy the certificate in your clipboard:

Mac:

pbcopy < client_cert_app_001.pem

Linux:

 xclip -sel clip < client_cert_app_001.pem

Windows:

clip < client_cert_app_001.pem

Go to the Redis Enterprise Admin Web Console and enable TLS on your database:

  1. Edit the database configuration
  2. Check TLS
  3. Select "Require TLS for All communications"
  4. Check "Enforce client authentication"
  5. Paste the certificate in the text area
  6. Click the Save button to save the certificate
  7. Click the Update button to save the configuration.

Security Configuration

The database is now protected, and it is mandatory to use the SSL certificate to connect to it.

redis-cli -p 12000 -a secretdb01 INFO SERVER
(error) ERR unencrypted connection is prohibited

4- Connect to the Database using the Certificate

In all following examples, I am using a "self-signed" certificate, so I do not check the validity of the hostname. You should adapt the connections/TLS information based on your certificate configuration.

4.1 Using Redis-CLI

To connect to a SSL protected database using redis-cli you have to use stunnel.

Create a stunnel.conf file with the following content:

cert = /path_to/certificates/client_cert_app_001.pem
key = /path_to/certificates/client_key_app_001.pem
cafile = /path_to/certificates/proxy_cert.pem
client = yes

[redislabs]
accept = 127.0.0.1:6380
connect = 127.0.0.1:12000

Start stunnel using the command

stunnel ./stunnel.conf

This will start a process that listen to port 6380 and used as a proxy to the Redis Enterprise database on port 12000.

redis-cli -p 6380 -a secretdb01 INFO SERVER

4.2 Using Python

Using Python, you have to set the SSL connection parameters:

#!/usr/local/bin/python3

import redis
import pprint

try:
r = redis.StrictRedis(
password='secretdb01',
decode_responses=True,
host='localhost',
port=12000,
ssl=True,
ssl_keyfile='./client_key_app_001.pem',
ssl_certfile='./client_cert_app_001.pem',
ssl_cert_reqs='required',
ssl_ca_certs='./proxy_cert.pem',
)

info = r.info()
pprint.pprint(info)

except Exception as err:
print("Error connecting to Redis: {}".format(err))

4.3 Using Node.JS

For Node Redis, use the TLS library to configure the client connection:

import { createClient } from 'redis';
import tls from 'tls';
import fs from 'fs';

const ssl = {
key: fs.readFileSync(
'../certificates/client_key_app_001.pem',
{encoding: 'ascii'},
),
cert: fs.readFileSync(
'../certificates/client_cert_app_001.pem',
{encoding: 'ascii'},
),
ca: [fs.readFileSync('../certificates/proxy_cert.pem', {encoding: 'ascii'})],
checkServerIdentity: () => {
return null;
},
};

const client = redis.createClient({
// replace with your connection string
url: 'rediss://localhost:12000',
socket: {
tls: true,
key: ssl.key,
cert: ssl.cert,
ca: ssl.ca,
},
});

client.info('SERVER', function (err, reply) {
console.log(reply);
});

await client.connect();

More information in the documentation "Using Redis with Node.js".

4.4 Using Java

In Java, to be able to connect using SSL, you have to install all the certificates in the Java environment using the keytool utility.

Create a keystore file that stores the key and certificate you have created earlier:

openssl pkcs12 -export \
-in ./client_cert_app_001.pem \
-inkey ./client_key_app_001.pem \
-out client-keystore.p12 \
-name "APP_01_P12"

As you can see the keystore is used to store the credentials associated with you client; it will be used later with the -javax.net.ssl.keyStore system property in the Java application.

In addition to the keys tore, you also have to create a trust store, that is used to store other credentials for example in our case the redis cluster certificate.

Create a trust store file and add the Redis cluster certificate to it

keytool -genkey \
-dname "cn=CLIENT_APP_01" \
-alias truststorekey \
-keyalg RSA \
-keystore ./client-truststore.p12 \
-keypass secret
-storepass secret
-storetype pkcs12
keytool -import \
-keystore ./client-truststore.p12 \
-file ./proxy_cert.pem \
-alias redis-cluster-crt

The trustore will be used later with the -javax.net.ssl.trustStore system property in the Java application.

You can now run the Java application with the following environment variables:

java -Djavax.net.ssl.keyStore=/path_to/certificates/java/client-keystore.p12 \
-Djavax.net.ssl.keyStorePassword=secret \
-Djavax.net.ssl.trustStore=/path_to/certificates/java/client-truststore.p12 \
-Djavax.net.ssl.trustStorePassword=secret \
-jar MyApp.jar

For this example and simplicity, I will hard code these property in the Java code itself:


import redis.clients.jedis.Jedis;
import java.net.URI;

public class SSLTest {

public static void main(String[] args) {

System.setProperty("javax.net.ssl.keyStore", "/path_to/certificates/client-keystore.p12");
System.setProperty("javax.net.ssl.keyStorePassword", "secret");

System.setProperty("javax.net.ssl.trustStore","/path_to/certificates/client-truststore.p12");
System.setProperty("javax.net.ssl.trustStorePassword","secret");

URI uri = URI.create("rediss://127.0.0.1:12000");

Jedis jedis = new Jedis(uri);
jedis.auth("secretdb01");


System.out.println(jedis.info("SERVER"));
jedis.close();
}

}
  • line 8-12, the system environment variables are set to point to the keystore and trust store (this should be externalized)
  • line 14, the Redis URL start with rediss with 2 s to indicate that the connection should be encrypted
  • line 17, set the database password

More information in the documentation "Using Redis with Java".

Conclusion

In this article, you have learned how to:

  • retrieve the Redis Server certificate
  • generate a client certificate
  • protect your database to enforce transport level security (TLS) with 2 ways authentication
  • connect to the database from redis-cli, Python, Node and Java
- + \ No newline at end of file diff --git a/guides/data-modeling/index.html b/guides/data-modeling/index.html index 26bcd27875..15c2240218 100644 --- a/guides/data-modeling/index.html +++ b/guides/data-modeling/index.html @@ -4,7 +4,7 @@ Data Modeling for Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Data Modeling for Redis

Introduction

- + \ No newline at end of file diff --git a/guides/import-data/index.html b/guides/import-data/index.html index 37347bf6da..09779512e4 100644 --- a/guides/import-data/index.html +++ b/guides/import-data/index.html @@ -4,7 +4,7 @@ Import Data into Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Import Data into Redis

Import data to the Cloud

Import using RIOT

$ riot -is -cool

- + \ No newline at end of file diff --git a/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html b/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html index f3092b6de7..29f705eac2 100644 --- a/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html +++ b/guides/import/database-migration-aws-elasticache-redis-enterprise-cloud/index.html @@ -4,7 +4,7 @@ Online Database Migration from Amazon ElastiCache to Redis Enterprise Cloud using RIOT | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Online Database Migration from Amazon ElastiCache to Redis Enterprise Cloud using RIOT

Authors: Ajeet Singh Raina, Julien Ruaux

Most of the database migration tools available today are offline in nature. They are complex and require manual intervention.

If you want to migrate your data from Amazon ElastiCache to Redis Enterprise Cloud, for example, the usual process is to back up your ElastiCache data to an Amazon S3 bucket and then import your data using the Redis Enterprise Cloud UI. This process can require painful downtime and could result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server and applying the changes to the destination servers to keep both the servers in sync. That might sound like a good approach, but it can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

So we’ve come up with a different approach:

Introducing RIOT

image

RIOT is an open source online migration tool built by Julien Ruaux, a Solution Architect at Redis. RIOT implements client-side replication using a producer/consumer approach. The producer is the combination of the key and value readers that have a connection to ElastiCache. The key reader component identifies keys to be replicated using scan and keyspace notifications. For each key, the value reader component performs a DUMP and handles the resulting key+bytes to the consumer (writer), which performs a RESTORE on the Redis Enterprise connection.

This blog post will show how to perform a seamless online migration of databases from ElastiCache to Redis Enterprise Cloud.

Prerequisites:

You will require a few resources to use the migration tool:

  • A Redis Enterprise Cloud subscription
  • Amazon ElastiCache (a primary endpoint in case of a single-master EC and a configuration endpoint in case of a clustered EC: Refer to Finding Connection Endpoints on the ElastiCache documentation to learn more)
  • An Amazon EC2 instance based on Linux

Step 1 - Setting up an Amazon EC2 instance

You can either create a new EC2 instance or leverage an existing one. In our example, we will first create an instance on Amazon Web Services (AWS). The most common scenario is to access an ElastiCache cluster from an Amazon EC2 instance in the same Amazon Virtual Private Cloud (Amazon VPC). We have used Ubuntu 16.04 LTS for this setup, but you can choose the Ubuntu or Debian distribution of your choice.

Use SSH to connect to this new EC2 instance from your computer as shown here:

ssh -i “public key” <AWS EC2 Instance>

Step 2 - Install the redis-cli tool

$ sudo apt update
# sudo apt install -y redis-tools

Verify the connectivity with the ElastiCache database

Syntax:

$ redis-cli -h <Elasticache Primary Endpoint > -p 6379

Command:

$ sudo redis-cli -h <elasticache primary endpoint> -p 6379

Ensure that the above command allows you to connect to the remote Redis database successfully.

Step 3 - Using the RIOT migration tool

Run the commands below to set up the migration tool.

Prerequisites:

Install Java

We recommended using OpenJDK 11 or later:

sudo add-apt-repository ppa:openjdk-r/ppa && sudo apt-get update -q && sudo apt install -y openjdk-11-jdk

Installing RIOT

Unzip the package and make sure the RIOT binaries are in place, as shown here:

wget https://github.com/Redislabs-Solution-Architects/riot/releases/download/v2.0.8/riot-redis-2.0.8.zip
unzip riot-redis-2.0.8.zip
cd riot-redis-2.0.8/bin/

You can check the version of RIOT by running the command below:

./riot-redis --version
RIOT version "2.0.8"
bin/riot-redis --help
Usage: riot-redis [OPTIONS] [COMMAND]
-q, --quiet Log errors only
-d, --debug Log in debug mode (includes normal stacktrace)
-i, --info Set log level to info
-h, --help Show this help message and exit.
-V, --version Print version information and exit.
Redis connection options
-r, --redis=<uri> Redis connection string (default: redis://localhost:6379)
-c, --cluster Connect to a Redis Cluster
-m, --metrics Show metrics
-p, --pool=<int> Max pool connections (default: 8)
Commands:
replicate, r Replicate a source Redis database in a target Redis database
info, i Display INFO command output
latency, l Calculate latency stats
ping, p Execute PING command

Once Java and RIOT are installed, we are all set to begin the migration process with the command below, which replicates data directly from the source (ElastiCache) to the target (Redis Enterprise Cloud).

Step 4 - Migrate the data

Finally, it’s time to replicate the data from ElastiCache to Redis Enterprise Cloud by running the below command:

sudo ./riot-redis -r redis://<source Elasticache endpoint>:6379 replicate -r redis://password@<Redis Enterprise Cloud endpoint>:port --live

ElastiCache allows you to configure in two ways: clustered and non-clustered. In the chart below, the first row shows what commands you should perform for the non-clustered scenario, while the second row shows the command for the clustered scenario with a specific database namespace:

As you can see, whenever you have a clustered ElastiCache, you need to pass the –cluster option before specifying the source ElastiCache endpoint.

Important notes

  • Perform user acceptance testing of the migration before using it in production.
  • Once the migration is complete, ensure that application traffic gets successfully redirected to the Redis Enterprise endpoint.
  • Perform the migration process during a period of low traffic to minimize the chance of data loss.

Conclusion

If you’re looking for a simple and easy-to-use live migration tool that can help you move data from Amazon ElastiCache to Redis Enterprise Cloud with no downtime, RIOT is a promising option.

- + \ No newline at end of file diff --git a/guides/import/index.html b/guides/import/index.html index 33d6ab50b6..5466345e52 100644 --- a/guides/import/index.html +++ b/guides/import/index.html @@ -4,7 +4,7 @@ Import Data into Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Import Data into Redis

Redis offers multiple ways to import data into a database; from an file, an script or from an existing Redis database.

Import using reds-cli script

  1. Create a simple file users.redis with all the commands you want to run

    HSET 'user:001' first_name 'John' last_name 'doe' dob '12-JUN-1970'
    HSET 'user:002' first_name 'David' last_name 'Bloom' dob '03-MAR-1981'
  2. Use the redis-cli tool to execute the script

    redis-cli -h localhost -p 6379 < users.redis

This approach will only run the commands and will not impact the existing data, except if you modify existing keys in the script.

Sample dataset: You can find sample dataset ready to be imported using this method in the https://github.com/redis-developer/redis-datasets repository.


Restore an RDB file

If you have an RDB file dump.rdb that contains the data you want you can use this file to create a new database

  1. Copy the dump.rdb file into the Redis working directory

    If you do not know what it is folder you can run the command CONFIG get dir where your Redis instance is up and running

  2. Start the Redis service with the redis-server

  3. The file dump.rdb is automatically imported.

  4. Connect to the database using redis-cli or any other client, to check that data have been imported. (for example SCAN)

- Warning: Importing data erases all existing content in the database.


Import & Synchronize using RIOT

Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

  • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...
  • RIOT File: bulk import/export data from/to files.
  • RIOT Gen: generate sample Redis datasets for new feature development and proof of concept.
  • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
  • RIOT Stream: import/export messages from/to Kafka topics.

Import data into Redis Enterprise

You can easily import data into Redis Enterprise and Redis Enterprise Cloud, take a look to the following documentation:

- + \ No newline at end of file diff --git a/guides/index.html b/guides/index.html index c379a7d4a6..4bf3693320 100644 --- a/guides/index.html +++ b/guides/index.html @@ -4,7 +4,7 @@ Guides | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/guides/indexing/index.html b/guides/indexing/index.html index 0e27731f02..c4e6ddd1e4 100644 --- a/guides/indexing/index.html +++ b/guides/indexing/index.html @@ -4,7 +4,7 @@ Indexing and Querying | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Indexing and Querying

Introduction

Conceptually, Redis is based on the key-value database paradigm. Every piece of data is associated with a key, either directly or indirectly. If you want to retrieve data based on anything besides the key, you’ll need to implement an index that leverages one of the many data types available in Redis.

You have various ways to create index using Redis core datastructures, for example:

  • Sorted sets to create secondary indexes by ID or other numerical fields.
  • Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes.
  • Sets for creating random indexes.
  • Lists for creating simple iterable indexes and last N items indexes.

When using these datastructures you must create your own API to keep the index up-to-date. To simplify and automate this task, Redis has Search that allows indexing and querying.

The easiest way to index and query data in Redis is to use the Redis Search module.

You can follow the Redis Search Tutorial to learn more about it and look at the following video from Redis University:

Querying, Indexing, and Full-text Search in Redis

If you have questions about Redis Search and other module ask them in the Redis Community Forum.

- + \ No newline at end of file diff --git a/guides/security/index.html b/guides/security/index.html index 8809f7255d..f9bc1d6475 100644 --- a/guides/security/index.html +++ b/guides/security/index.html @@ -4,7 +4,7 @@ Redis Security | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/hacktoberfest/index.html b/hacktoberfest/index.html index d4d6bd6d66..c07fc475a6 100644 --- a/hacktoberfest/index.html +++ b/hacktoberfest/index.html @@ -4,7 +4,7 @@ Hacktoberfest 2021 at Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Hacktoberfest 2021 at Redis


Profile picture for Suze Shardlow
Author:
Suze Shardlow, Developer Community Manager at Redis
Profile picture for Simon Prickett
Author:
Simon Prickett, Principal Developer Advocate at Redis

(Looking for Hacktoberfest 2022? Find us over at redis.io!)

Hacktoberfest is a month-long online festival which takes place every year in October. It is sponsored by DigitalOcean and aims to encourage people to get involved in open source projects. Hacktoberfest 2021 has now finished! We've left the below information here so you can see how it worked and check out the recordings of our live streams.

How to get involved

We've created a number of GitHub issues for folks who want to contribute to our documentation and demo apps. View our list of open issues.

Get a GitHub account and Hacktoberfest account

You'll need a GitHub account to contribute to our repos. Sign up for free at GitHub.com.

You'll also need to register with Hacktoberfest using your GitHub account if you want to be in with a chance of earning swag from DigitalOcean. Please note that Redis is not involved in allocating or sending swag.

Finding and working on an issue

  1. Look for a suitable issue on GitHub. Where possible, we have tagged them according to the skillset and level of experience required.

  2. Read the guidance notes on each issue carefully so you know what's expected of you.

  3. Add a comment in the issue stating that you're working on it. To be fair to other contributors, only claim one issue at a time.

    Example Issue

  4. Open a pull request within two calendar days:

    • This is to give more people a fair chance at finding an unclaimed issue.
    • Make sure you reference the issue number in your pull request so that it shows on the issue's page.
    • If you include your Twitter handle, we will give you a shout out.
    • If you're a member of our Discord server, include your Discord handle and we will bestow the Hacktoberfest 2021 role upon you.

    Example Pull Request

    When you do this, your pull request will then be automatically referenced in the issue:

    Example Issue with Pull Request

    • If you don't submit a pull request within two calendar days, we will make the issue available to other contributors.
  5. We will review your pull request. If it's suitable, we'll merge it and add the hacktoberfest-accepted label. If we feel that further work is required, we'll comment as part of our review.

Read DigitalOcean's complete Hacktoberfest rules here.

Join the conversation

Need help with one of our issues, or just want to chat with other contributors? Join us on Discord!

Looking for more repos?

If you're looking for more repos to contribute to during Hacktoberfest, check out the Hacktoberfest topic on GitHub. Redis is not responsible for the content of third party repositories.

Learn more

Documentation is often cited as a great way to get your feet wet with open source. So to demystify the world of technical writing, we have hosted four live events with our documentation team. Suze Shardlow, Developer Community Manager, sat down with Technical Writers Kaitlyn Michael, Rachel Elledge and Lance Leonard for a series of fireside chats.

Fireside panel - Suze Shardlow with the documentation team: Technical Writing Explained

Fireside 1:1 - Suze Shardlow with Kaitlyn Michael: Technical Writing Explained

Fireside 1:1 - Suze Shardlow with Rachel Elledge: Technical Writing Explained

Fireside 1:1 - Suze Shardlow with Lance Leonard: Technical Writing Explained

Contact us

Hacktoberfest at Redis is brought to you by Suze Shardlow and Simon Prickett of the Redis Developer Relations team. Contact us if you have any questions that aren't addressed here. Please note that we are available during UK daytime.

We can't debug or refactor your code for you, but if you need help understanding how the project works, write a post in the Hacktoberfest channel on our Discord server.

- + \ No newline at end of file diff --git a/hacktoberfest/stories/lara-aasem/index.html b/hacktoberfest/stories/lara-aasem/index.html index 123daf288b..2e9cb68791 100644 --- a/hacktoberfest/stories/lara-aasem/index.html +++ b/hacktoberfest/stories/lara-aasem/index.html @@ -4,7 +4,7 @@ Hacktoberfest Stories: Opening the source of open source | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Hacktoberfest Stories: Opening the source of open source

Preface by Suze Shardlow, Developer Community Manager at Redis:

Lara Aasem is a backend software engineer based in Cairo, Egypt. For Hacktoberfest, she contributed an enhancement to Kaboom, which is one of our demo apps. This is her story.

Open source has always simultaneously fascinated and daunted me. As a backend engineer with a background in sociology, the fact that our world is powered by software built out of empathy, a desire to improve our collective experience, and genuine love of craft—rather than profit—seemed upliftingly radical to me. The open source community's codes of conduct, care for all involved, and emphasis on learning drew me in, but I was still intimidated by it. How do I even start contributing significantly to projects I know nothing about?

It was there that Hacktoberfest found me. It was 2020 and I was frustrated after a drawn-out attempt to make a sizable contribution to a project went awry because I could never find the time to properly address the review comments for a change of that size. After hearing about the event from coworkers, I realized there was a space in which I could make small, beginner-friendly yet meaningful contributions. While exploring unfamiliar codebases and using languages I may not be super comfortable with was challenging, it was also rewarding, especially with the support of maintainers and the knowledge that I was contributing to building the kind of world I dream about.

Finding...

My first experience with Hacktoberfest was so fulfilling, I spent all of 2021 excited for October. When the time came, I once again used www.goodfirstissues.com, an aggregator that lists GitHub issues labeled as goodfirstissue, with the ability to filter by other labels as well as the programming language and name of the repository housing the issue. My criteria when searching for issues were:

  • small and well-documented enough to be worked on in a few hours,
  • in a language I know but am not necessarily proficient in, and
  • exciting because of the nature of the project, the learning opportunities it offers, or (ideally) both.

This is how I came across an issue by Redis to implement an API validation for a Redis RPG game example built with Kaboom.JS.

Screen grab of the issue

It fit all my criteria:

  • As a straightforward change, it would only take a few hours to become familiar with the project, run it, implement the validation, and address any review comments, especially since the project was well-documented and the issue description explained exactly what was required and how to seek help from the maintainers.
  • It was in Node.js, a framework I'm very familiar with.
  • I was excited to learn more about Redis and contribute to a repository that helped other developers learn more about it.

... enjoying...

This last point made the issue all the more enjoyable to work on. I have a high-level understanding of how Redis works and have used it before with Ruby on Rails via redis-rb, a Ruby client library. It was exciting to try a Node.js client instead (ioredis) and to be exposed to Redis JSON, going through its docs to find the most suitable command to use for this particular issue. It was also helpful to see another contributor suggest improvements to my implementation in their own pull request (PR) implementing validation for another API.

... and working on the issue

1. Finding out how to contribute

Different projects have different guidelines for contributing. These may be outlined in the README.md of the project's GitHub repo, in a separate CONTRIBUTING.md file in the repo's base directory, or in a guide on the project or organization's website, the latter being the case with Redis as explained in the issue description. The Redis Hacktoberfest guide asked contributors to comment on the issue they're working on and to only claim one at a time out of fairness, which seems to be standard procedure across many open source repos.

Screen grab of Lara asking to be assigned

2. Running the project

After quickly combing through the README.md, my next step was to run the project to get a better sense of how it worked. At the time, the project structure was that you could run Redis via Docker but you had to run the API server locally (this has since been addressed via another issue), so this is what I did. I also made sure to load the sample data as instructed in the Setup section of the README.md (and after, if I remember correctly, a few errors).

Screen grab of the Redis Kaboom server listening on port 8080

Screen grab of the Redis Kaboom server listening on port 8080

Screen grab of the Redis Kaboom server listening on port 8080

3. Trying out the API

The API in question is a GET request that fetches the data for a room given the game ID.

Screen grab of Postman

4. Implementing the validation

If this API was called with an invalid room number, the server would crash with a 500 HTTP status code. The issue was to explicitly validate the room number, returning a more meaningful 400 HTTP status code and response body to the client.

After combing through the sample data I had loaded previously via npm run load and finding out that the room data was persisted as an array of JSON objects, I assumed the minimum room number would be 0. To get the maximum, then, I would need to get the last index in the array by getting the array length and subtracting one from it. For this, I used the JSON.ARRLEN Redis JSON command, validating that the room number sent in the request path was within range and returning 400 otherwise.

Screen grab of the file changes in GitHub

5. Testing

Always a beautiful moment:

Screen grab of Postman showing an invalid room number

6. Opening a PR

Once I was satisfied with the functionality and quality of the code, I pushed my changes to a fork of the upstream repo and opened a PR. I simply linked the issue number in the PR description as there was no required template to follow and there wasn't much else to note regarding the implementation.

Screen grab of Lara&#39;s pull request

Post-merge-um

On checking my PR to see that it was reviewed, approved, and merged (and to revel in the beauty of all those hacktoberfest-accepted, Merged, and Closed labels), I noticed another contributor had referenced my PR in their own. They had some good comments on a corner case I had missed as well as the format of the response I was sending.

Screen grab of another contributor&#39;s PR

A quarter of the way into Hacktoberfest 2021 and I had already learned a lot, engaged with other members of the open source community, and had a good time doing it. While finding and contributing to suitable open source issues could still be challenging at times, it was no longer the seemingly impossible task it used to be.

- + \ No newline at end of file diff --git a/hacktoberfest/stories/vincent-aceto/index.html b/hacktoberfest/stories/vincent-aceto/index.html index 42e933d51b..af0ca96936 100644 --- a/hacktoberfest/stories/vincent-aceto/index.html +++ b/hacktoberfest/stories/vincent-aceto/index.html @@ -4,7 +4,7 @@ Hacktoberfest Stories: A Hacktoberfest Composition: Redis and Docker | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Hacktoberfest Stories: A Hacktoberfest Composition: Redis and Docker

Hello! My name's Vincent Aceto and I am a Software Engineer based in New York City. Throughout the week, you can find me hacking away on mobile and TV applications over at Equinox Media. On the weekends, when I'm not getting lost somewhere on my skateboard, I'll be nose deep in some open source or personal projects.

October is a special month for those who enjoy working on exciting software projects. In fact, there couldn’t be a more perfect month to get into software exploration and contribution; it's collectively known as Hacktoberfest! Hacktoberfest is a community-led effort to encourage open source contributing and foster learning. I am a huge advocate for open source, so getting involved in Hacktoberfest is such a joy; if I have the chance to learn something new or brush up on some skills, then definitely count me in.

Now, rewind the clock a bit, and you'd find me perusing Github's Hacktoberfest-tagged issues. I wanted to find the best first contribution for the month's coding festivities. While searching, I had one very important criterion that the introductory issue needed to satisfy: to work with a technology that I do not use on a daily basis. I wanted to make sure that I walked away with a newfound knowledge that would benefit my career. After some time, my eyes landed on a Redis Developer Community issue - I knew it immediately, this was perfect! The checkbox was ticked, for I do not regularly work with Redis. I was now ready to kick off the Hacktoberfest celebration.

The project I worked on is entitled Introducing The Geosearch Command. The goal of the project is to demonstrate the use of the GEOSEARCH command, which was added to Redis in the recent 6.2 release. Working as a software engineer, you are almost always going to be working with some cached data and, more often than not, it's Redis that is sitting nicely somewhere in that cache layer. That said, my first-hand experience (at the time) with the caching technology resonated somewhere between “landing page” and “getting started”. The project had turned out to be developer sale, a two-for-one: I would get to learn more about the Redis technology, how to set up an instance, familiarize myself with the API, and I would get the opportunity to work with Docker - which I'm not regularly hacking with during my day-to-day.

Now, onto the issue. The issue's aim was to extend an existing Docker Compose integration. The docker-compose.yml file was to include a schema, which was to run the repository's Python Flask application in a Docker container. Additionally, the main application was to connect to the project's existing Redis container - this Redis build step was already included in the Docker Compose file. With the features and constraints clearly defined, the next step was to pull out the documentation. To make sure I was familiar with the tech at hand, and to ensure I got the most out of working on the issue, I started with the Redis installation docs - becoming aware that things like the default Redis port 6379 would come to serve me well when debugging. After installation, I took some time to explore the Redis API and read about Redis' internal hash implementation at a high level. The final reconnaissance was to review Docker. I had briefly used Docker at a previous position, and have worked on some personal projects using the container technology; however, a quick Dockerfile and docker-compose.yml refresher was necessary.

With the pre-work done, it was time to start the Flask application's Docker Compose implementation. Here is a step-by-step guide, expressed in the present tense, to the process:

First, let's start with the Docker Compose YAML file:

Screen grab of the YAML file

As you can see, we have some Redis provisioning steps. We assign a name to the container, define the version of Redis we wish to spin up, and the port mapping (6379:6379 states we'd like to expose port 6379 from inside the container to a port on your local machine).

Now, let's start with composing the project's application. Unlike the Redis container, which uses an official Docker image to build from, we don't have a blueprint to scaffold the project's application. This blueprint, or schema, is called a Dockerfile. A Dockerfile lists steps on how to build our image. It's this very image that tells Docker's engine how to build the container. Let's create a Dockerfile, which will assemble the application image for us:

Screen grab of the Dockerfile

In short, this file serves as the foundation for the construction of the project's application environment. Additionally, the file tells Docker which files we want to include in our container, how to install the contained app's dependencies and what command should be used to run the app. Most of the file's instructions are better explained in the official documentation, so please take a look there if you're curious as to what the file's instructions have to offer.

Great, before we move on to the compose file, let's make sure we test that Docker is able to build and run the container from our image.

Let's build the image:

Screen grab of the image build

Get our newly created image's hash identifier by listing our local images:

Screen grab of the list of local images

Now let’s run the container using the image id, while making sure we bind a port on our machine to the exposed port defined in the Dockerfile:

Screen grab of the port binding

Great! The logs indicate the container is running. Let's ensure our port mapping is working. A quick cURL command verifies that we can talk to the application:

Screen grab of the cURL command

With the Flask application Docker-fiedTM, let's compose it with Redis!

Screen grab of the Redis composition

Let us quickly dissect what was added to the docker-compose.yml:

  1. Define a service for the application (namespaced under 'app')
  2. Define a name for the container
  3. Set a build context/entry point (this is the relative location for our service's Dockerfile)
  4. Map the service's port to the host machine
  5. Ensure that Redis is initialized before the Flask app starts (since the Flask application requires a Redis connection on init)
  6. Define the necessary environment variables.

With the scaffolding in order, it's now time to run both the Flask application and Redis with Docker Compose. To do so, we'll run the command docker-compose up:

Screen grab of the docker-compose-up command

Finally, let's navigate to localhost:5000 in our browser to see the application in action:

Screen grab showing localhost:5000

Excellent, the Flask application is running and is composed with the pre-existing Redis integration!

Now, before I conclude, I'd be remiss if I said that things worked as smoothly as portrayed; however, we welcome such hiccups and challenges. The main problem I faced was an empty response from the contained application server. What could be the issue? The Dockerfile, for the Flask app, is working. The compose file seemingly provisions our services successfully. What could be the problem here? Welp, turns out I forgot a very important factoid: Docker Compose will set up a single default network, one of which will house the services defined in the yaml file. Containers and their services can communicate within this network, but what about our browser - which is not on that Docker network?

To resolve this issue, we need to tell our contained application server that it should listen on all networks, not just localhost; which, in the context of our running Docker network, is local only to that micro-network, if you will. To tell the Flask server to listen on all accessible networks, we can define our host in the Dockerfile's CMD command:

Screen grab showing the CMD command

All good!

Working through this issue, I definitely picked up some newfound Redis knowledge! While not 100% necessary for the task at hand, starting with the official documentation and exploring the API provided me with the confidence needed to tackle this issue. Additionally, the project allowed me to solidify some pre-existing Docker knowledge; and, very politely, pointed out which knowledge gaps needed to be filled.

Working through this Hacktoberfest-inspired issue was very rewarding, and I can say that I have walked away a better developer. Not only was I exposed to more technology, and got to flex some problem-solving muscles, but my passion for open-source software collaboration has grown evermore.

Thank you for reading! I hope my story inspires you to start with (or continue) working with open source.


You can find Vincent online at his website and at LinkedIn.

- + \ No newline at end of file diff --git a/howtos/analytics/index.html b/howtos/analytics/index.html index 47464a208e..fa8d4c8f42 100644 --- a/howtos/analytics/index.html +++ b/howtos/analytics/index.html @@ -4,7 +4,7 @@ Building an Analytics dashboard app using Redis | The Home of Redis Developers - + @@ -26,7 +26,7 @@ Example:

 GET rab:count:action:addToCart:timeSpan:2015-12/1
  • Shares of products bought ({productPage} is on of product1, product2, product3):

December: GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12 Example:

 GET rab:count:action:buy:page:product3:timeSpan:2015-12
  • X week of December: GET rab:count:action:buy:page:{productPage}:timeSpan:2015-12/{X} Example:

     GET rab:count:action:buy:page:product1:timeSpan:2015-12/2

Customer and Cohort Analysis

  • People who registered: BITCOUNT rab:bitmap:action:register:timeSpan:2015-12
  • People who register then bought (order matters): BITCOUNT rab:bitmap:custom:cohort-buy:timeSpan:2015-12
  • Dropoff: (People who register then bought / People who register) * 100 [%]
  • Customers who bought only specified product ({productPage} is one of: product1, product2, product3):
SMEMBERS rab:set:action:buy:page:{productPage}:timeSpan:2015-12

Example:

 SMEMBERS rab:set:action:buy:page:product2:timeSpan:2015-12
  • Customers who bought Product1 and Product2:
SINTER rab:set:action:buy:page:product1:timeSpan:anytime rab:set:action:buy:page:product2:timeSpan:anytime
  • Customer Retention (customers who bought on the different dates): SMEMBERS rab:set:custom:retention-buy:timeSpan:anytime

References

- + \ No newline at end of file diff --git a/howtos/antipatterns/index.html b/howtos/antipatterns/index.html index 63a4afeb03..b9f74231b4 100644 --- a/howtos/antipatterns/index.html +++ b/howtos/antipatterns/index.html @@ -4,7 +4,7 @@ Redis Anti-Patterns Every Developer Should Avoid | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Redis Anti-Patterns Every Developer Should Avoid


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

antipattern

Developers don’t just use Redis, they love it. Stack Overflow’s annual Developer Survey 2021 has ranked Redis as the Most Loved Database platform for the fifth years running! But it is equally important to understand that Redis defaults are not the best for everyone. Millions of developers uses Redis due to its speed and performance, however it is important to make sure that it is being used properly.

"Antipatterns" basically refers to those practices and solutions that might seem to be a good fit initially but when it comes to implementation phase, it makes your code much more complex. Let us look at the top Redis anti-patterns to avoid:

1. Large databases running on a single shard/Redis instance

With large databases running on a single shard/Redis instance, there are chances that the fail over, backup and recovery all will take longer. Hence, it’s always recommended to keep shards to recommended sizes. General conservative rule of thumb is 25Gb or 25K Ops/Second.

Redis Enterprise recommends to shard if you have more than 25 GB of data and a high number of operations. Another aspect is if you have above 25,000 operations per second, then sharding can improve performance. With less number of operations/second, it can handle up to 50GB of data too.

Examples #1 - redis-py

Let us look at the redis-py that uses a connection pool to manage connections to a Redis server. By default, each Redis instance you create will in turn create its own connection pool. You can override this behavior and use an existing connection pool by passing an already created connection pool instance to the connection_pool argument of the Redis class. You may choose to do this in order to implement client side sharding or have fine-grain control of how connections are managed.

 >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
>>> r = redis.Redis(connection_pool=pool)

2. Connecting directly to Redis instances

With a large number of clients, a reconnect flood will be able to simply overwhelm a single threaded Redis process and force a failover. Hence, it is recommended that you should use the right tool that allows you to reduce the number of open connections to your Redis server.

Redis Enterprise DMC proxy allows you to reduce the number of connections to your cache server by acting as a proxy. There are other 3rd party tool like Twemproxy. It is a fast and lightweight proxy server that allows you to reduce the number of open connections to your Redis server. It was built primarily to reduce the number of connections to the caching servers on the backend. This, together with protocol pipelining and sharding enables you to horizontally scale your distributed caching architecture.

3. More than one secondary shard (Redis OSS)

Redis OSS uses a shard-based quorum. It's advised to use at least 3 copies of the data (2 replica shards per master shard) in order to be protected from split-brain situations. In nutshell, Redis OSS solves the quorum challenge by having an odd number of shards (primary + 2 replicas).

Redis Enterprise solves the quorum challenge with an odd number of nodes. Redis Enterprise avoids a split-brain situation with only 2 copies of the data, which is more cost-efficient. In addition, the so-called ‘quorum-only node' can be used to bring a cluster up to an odd number of nodes if an additional, not necessary data node would be too expensive.

4. Performing single operation

Performing several operations serially increases connection overhead. Instead, use Redis Pipelining. Pipelining is the process of sending multiple messages down the pipe without waiting on the reply from each - and (typically) processing the replies later when they come in.

Pipelining is completely a client side implementation. It is aimed at solving response latency issues in high network latency environments. So, the lesser the amount of time spent over the network in sending commands and reading responses, the better. This is effectively achieved by buffering. The client may (or may not) buffer the commands at the TCP stack (as mentioned in other answers) before they are sent to the server. Once they are sent to the server, the server executes them and buffers them on the server side. The benefit of the pipelining is a drastically improved protocol performance. The speedup gained by pipelining ranges from a factor of five for connections to localhost up to a factor of at least one hundred over slower internet connections.

5. Caching keys without TTL

Redis functions primarily as a key-value store. It is possible to set timeout values on these keys. Said that, a timeout expiration automatically deletes the key. Additionally, when we use commands that delete or overwrite the contents of the key, it will clear the timeout. Redis TTL command is used to get the remaining time of the key expiry in seconds. TTL returns the remaining time to live of a key that has a timeout. This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset.Keys will accumulate and end up being evicted. Hence, it is recommended to set TTLs on all caching keys.

6. Endless Redis Replication Loop

When attempting to replicate a very large active database over a slow or saturated link, replication never finishes due to the continuous updates. Hence, it is recommended to tune the slave and client buffers to allow for slower replication. Check out this detailed blog.

7. Hot Keys

Redis can easily become the core of your app’s operational data, holding valuable and frequently accessed information. However, if you centralize the access down to a few pieces of data accessed constantly, you create what is known as a hot-key problem. In a Redis cluster, the key is actually what determines where in the cluster that data is stored. The data is stored in one single, primary location based off of hashing that key. So, when you access a single key over and over again, you’re actually accessing a single node/shard over and over again. Let’s put it another way—if you have a cluster of 99 nodes and you have a single key that gets a million requests in a second, all million of those requests will be going to a single node, not spread across the other 98 nodes.

Redis even provides tools to find where your hot keys are located. Use redis-cli with the –hotkeys argument alongside any other arguments you need to connect:

 $ redis-cli --hotkeys

When possible, the best defence is to avoid the development pattern that is creating the situation. Writing the data to multiple keys that reside in different shards will allow you to access the same data more frequently. In nutshell, having specific keys that are accessed with every client operation. Hence, it's recommended to shard out hot keys using hashing algorithms. You can set policy to LFU and run redis-cli --hotkeys to determine.

8. Using Keys command

In Redis, the KEYS command can be used to perform exhaustive pattern matching on all stored keys. This is not advisable, as running this on an instance with a large number of keys could take a long time to complete, and will slow down the Redis instance in the process. In the relational world, this is equivalent to running an unbound query (SELECT...FROM without a WHERE clause). Execute this type of operation with care, and take necessary measures to ensure that your tenants are not performing a KEYS operation from within their application code. Use SCAN, which spreads the iteration over many calls, not tying up your whole server at one time.

Scaning keyspace by keyname is an extremely slow operation and will run O(N) with N being the number of keys. It is recommended to use Redis Search to return information based on the contents of the data instead of iterating through the key space.

 FT.SEARCH orders "@make: ford @model: explorer"
2SQL: SELECT * FROM orders WHERE make=ford AND model=explorer"

9. Running Ephemeral Redis as a primary database

Redis is often used as a primary storage engine for applications. Unlike using Redis as a cache, using Redis as a primary database requires two extra features to be effective. Any primary database should really be highly available. If a cache goes down, then generally your application is in a brown-out state. If a primary database goes down, your application also goes down. Similarly, if a cache goes down and you restart it empty, that’s no big deal. For a primary database, though, that’s a huge deal. Redis can handle these situations easily, but they generally require a different configuration than running as a cache. Redis as a primary database is great, but you’ve got to support it by turning on the right features.

With Redis open source, you need to set up Redis Sentinel for high availability. In Redis Enterprise, it’s a core feature that you just need to turn on when creating the database. As for durability, both Redis Enterprise and open source Redis provide durability through AOF or snapshotting so your instance(s) start back up the way you left them.

10. Storing JSON blobs in a string

Microservices written in several languages may not marshal/unmarshal JSON in a consistent manner. Application logic will be required to lock/watch a key for atomic updates. JSON manipulation is often a very compute costly operation. Hence, it is recommended to use HASH data structure and also Redis JSON.

11. Translating a table or JSON to a HASH without considering query pattern

The only query mechanism is a SCAN which requires reading the data structure and limits filtering to the MATCH directive. It is recommended to store the table or JSON as a string. Break out the indexes into reverse indexes using a SET or SORTED SET and point back to the key for the string. Using SELECT command and multiple databases inside one Redis instance

The usage of SELECT and multiple databases inside one Redis instance was mentioned as an anti-pattern by Salvatore (the creator of Redis). It is recommended to use a dedicated Redis instance for each database need. This is especially true in microservice architectures where client applications might step on each other's toes (noisy neighbor, database setup/teardown impact, maintenance, upgrade, ...)

The Redis Time Series module provides a direct compete to time series databases. But if the only query is based on ordering, it's unnecessary complexity. Hence, it is recommended to use a SORTED SET with a score of 0 for every value. The values are appended. Or use a timestamp for the score for simple time based queries

References

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/caching/index.html b/howtos/caching/index.html index 381f8a111a..3c42b0eb3b 100644 --- a/howtos/caching/index.html +++ b/howtos/caching/index.html @@ -4,7 +4,7 @@ How to cache REST API responses Using Redis & NodeJS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to cache REST API responses Using Redis & NodeJS


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

My Image

This app returns the number of repositories a Github account has. When you first search for an account, the server calls Github's API to return the response. This can take some time. The server then adds the details of this slow response to Redis for future requests. When you search again, the next response comes directly from Redis cache instead of calling Github. The responses become much faster.

Pre-requisite

1. Install NodeJS

brew install node

2. Clone the repository

git clone https://github.com/redis-developer/basic-caching-demo-nodejs

3. Copy .env.sample to create .env

- REDIS_ENDPOINT_URI: Redis server URI
- REDIS_PASSWORD: Password to the server

4. Run frontend

cd client
yarn
yarn serve

5. Run backend

yarn
yarn start

Open up https://localhost:8081 and you can see a basic caching demo application up and running.

My Image

- + \ No newline at end of file diff --git a/howtos/chatapp/index.html b/howtos/chatapp/index.html index 300563c9e8..ec9258bf0f 100644 --- a/howtos/chatapp/index.html +++ b/howtos/chatapp/index.html @@ -4,7 +4,7 @@ How to build a Chat application using Redis | The Home of Redis Developers - + @@ -17,7 +17,7 @@ User data is stored in a hash set where each user entry contains the next values:

  • username: unique user name;

  • password: hashed password

  • Additionally a set of chat rooms is associated with user

  • Rooms are sorted sets which contains messages where score is the timestamp for each message

  • Each chat room has a name associated with it

  • The "online" set is global for all users is used for keeping track on which user is online.

  • Each user hash's set is accessed by key user:{userId}. The data for it stored with HSET key field data. User ID is calculated by incrementing the total_users key (INCR total_users)

  • Usernames are stored as separate keys (username:{username}) which returns the userId for quicker access and stored with SET username:{username} {userId}.

  • Rooms which a user belongs to are stored at user:{userId}:rooms as a set of chat room ids. A room is added by SADD user:{userId}:rooms {roomId} command.

  • Messages are stored at room:{roomId} key in a sorted set (as mentioned above). They are added with the ZADD room:{roomId} {timestamp} {message} command. Messages are serialized to an app-specific JSON string.

How the data is accessed?

Get User HGETALL user:{id}.

 HGETALL user:2

where we get data for the user with id: 2.

  • Online users: SMEMBERS online_users. This will return ids of users which are online

  • Get room ids of a user: SMEMBERS user:{id}:rooms. Example:

 SMEMBERS user:2:rooms

This will return IDs of chat rooms for user with ID: 2

  • Get list of messages ZREVRANGE room:{roomId} {offset_start} {offset_end}. Example:
 ZREVRANGE room:1:2 0 50

It will return 50 messages with 0 offsets for the private room between users with IDs 1 and 2.

- + \ No newline at end of file diff --git a/howtos/frauddetection/index.html b/howtos/frauddetection/index.html index a60e5b7261..944277c061 100644 --- a/howtos/frauddetection/index.html +++ b/howtos/frauddetection/index.html @@ -4,7 +4,7 @@ How to build a Fraud Detection System using Redis | The Home of Redis Developers - + @@ -16,7 +16,7 @@ Using zcount, we find the number of clicks from a device in a certain pre configured window. If the count received is greater than a certain threshold, we identify it as anomalous.

Finally, data is pushed to Redistream using the xadd command. id=’*’ indicates Redistream to generate a unique id for our message.

Registering Gears:

When the app appears, a gear is registered, which reacts to the stream that we use to push data.

Gist:https://gist.github.com/Sachin-Kottarathodi/f9dac7a3342a3643e792e2143a6adf7d

 from gearsclient import GearsRemoteBuilder as GearsBuilder
from redistimeseries.client import Client

def stream_handler(item):
data = item['value']
member = json.dumps(
{'device_id': data['device_id'],
'transaction_id': data['transaction_id'],
'ts': data['ts'],
})
redis.Redis().zadd(data.get('device_id'), {member: data['ts']})
Client().incrby(data['fraud_type'], 1)

GearsBuilder(reader='StreamReader', r=redis_conn, requirements=["redis", "redistimeseries"]).foreach(stream_handler).register('data_stream')

As mentioned before, since RedisGears and Redis Time Series are modules, we need to use the clients provided in their respective packages.

We use the GearsRemoteBuilder class to build the Gear. StreamReader ensures that the stream_handler function is executed for every new message from the stream. The stream_handler adds the data to the sorted set using zadd (This information is used in zcount to identify click_spam) and increments the count of time series for clean and fraud types using incrby of the Redis Time Series module, which is later used for visualization.

Fraud Detection

Gear registration can be checked on RedisInsight as well.

Finally, we incorporate the flask app which exposes the end point for trigger.

Gist: https://gist.github.com/Sachin-Kottarathodi/2a6cccb29b4a9fdc7d58086af07aa6eb

 from flask import Flask, request
from fraud_checks import FraudChecks
from setup import Setup
app = Flask(__name__)


@app.route('/', methods=['POST'])
def check_fraud():
try:
response = FraudChecks().check_fraud(request.get_json())
code = 200
except Exception as e:
print("Error occurred ", e)
response = str(e)
code = 500

return response, code


if __name__ == '__main__':
Setup().init()
app.run(port=5000, debug=False, host='0.0.0.0')

Here, the app is exposed on port 5000. Before starting the server, our init method of setup is called to register the gear.The endpoint calls the function that does the fraud checks and returns the response.

The application is written in python and exposes an endpoint which accepts a few parameters. Use the below command to invoke the application:

 $ curl --request POST 'localhost:5000' --header 'Content-Type: application/json' --data-raw '{
"device_id": "111-000-000",
"ip": "1.1.1.1",
"transaction_id": "3e4fad5fs"}'
clean

Since initially no data is available in Cuckoo Filter, all IPs will be allowed through. To add data to Cuckoo Filter, connect to Redis using cli and run the command

 cf.addnx ip_cf 1.1.1.1

Run the post command with this IP again. This time, the result will be ip_blacklist.

Fraud Detection

Click Spamming:

The app is configured to allow two events in a window of 10 seconds from the same device. To verify, make more than two curl requests within 10 seconds and the result will be click_spam.

Fraud Detection

Optional: The following variables can be configured during the ‘docker run’ command. -e CLICK_SPAM_THRESHOLD=3 -e CLICK_SPAM_WINDOW_IN_SEC=10

Step #6: Deploy Grafana

It’s exciting to see the fraud detection plotted in Grafana. To implement this, run the command below:

 $ docker run -d -e "GF_INSTALL_PLUGINS=redis-app" -p 3000:3000 grafana/grafana

Point your browser to https://<IP_ADDRESS>:3000.

Fraud detection

Login as ‘admin’ with password as ‘admin’, you can reset the password after your first login.

Fraud detection

Click on the gear icon on the left panel (Configuration) and choose Data Sources.

Fraud detection

Choose ‘Add data source’.

Fraud detection

Search for Redis and choose Redis Data Source.

Fraud detection

Copy and paste the raw json content from here in the ‘Import via panel json’ box. Click on Load.

Fraud detection

This creates a dashboard ‘Fraud Stats’. If you get an error while importing the dashboard, try changing the name and UUID of the dashboard.

Fraud detection

Fraud detection

Conclusion & future work

  • If we consider the entire flow starting from fraud check, from event streaming to data processing to visualization (using insights), all of this would have required multiple components and extensive orchestration. With Redis Ecosystem, most of this is removed.
  • This is just the beginning of more checks that can be done on events. A lot of other checks can be done using modules and data structures. For example; Redis provides geospatial data structures built over sorted sets. Since latitude and longitude can be derived from IP using IP to location conversion providers, a lot of insight can be derived on whether the event can be fraudulent or not.
  • To reject servicing requests altogether, the redis-cell module to rate limit requests against a key can be used.
- + \ No newline at end of file diff --git a/howtos/hackernews/index.html b/howtos/hackernews/index.html index 85603c1acf..abf609f67a 100644 --- a/howtos/hackernews/index.html +++ b/howtos/hackernews/index.html @@ -4,7 +4,7 @@ How to build a HackerNews Clone using Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ It developed as a project of Graham's company Y Combinator, functioning as a real-world application of the Arc . programming language which Graham co-developed.

This is a HackerNews clone built upon React, NextJS as a frontend and NodeJS, ExpressJS & Redis as a backend. This application uses JSON for storing the data and Search in Redis Stack for searching.

hackernews

Step 1. Install the prerequisites

Install the below packages

  • NPM v7.8.0
  • NODE v15.10.0

Step 2. Create Redis Cloud database

Redis is an open source, in-memory, key-value data store most commonly used as a primary database, cache, message broker, and queue. Redis is popular among the developers as it delivers sub-millisecond response times, enabling fast and powerful real-time applications in industries such as gaming, fintech, ad-tech, social media, healthcare, and IoT.

Redis Cloud is a fully-managed cloud service for hosting and running your Redis dataset in a highly-available and scalable manner, with predictable and stable top performance. Redis Enterprise cloud allows you to run Redis server over the Cloud and access instance via multiple ways like RedisInsight, Redis command line as well as client tools. You can quickly and easily get your apps up and running with Redis Cloud through its Redis Heroku addons , just tell us how much memory you need and get started instantly with your first Redis database. You can then add more Redis databases (each running in a dedicated process, in a non-blocking manner) and increase or decrease the memory size of your plan without affecting your existing data.

Follow this link to create a Redis Cloud account with 2 databases with Redis Stack.

Save the database endpoint URL and password for our future reference

Step 3. Clone the repository

 git clone https://github.com/redis-developer/redis-hacker-news-demo
cd redis-hacker-news-demo

Step 4. Setting up environment variables

Copy .env.sample to .env and provide the values as shown below:

 MAILGUN_API_KEY=YOUR_VALUE_HERE
SEARCH_REDIS_SERVER_URL=redis://redis-XXXXX.c10.us-east-1-2.ec2.cloud.redislabs.com:10292
SEARCH_REDIS_PASSWORD=ABCDXYZbPXHWsC
JSON_REDIS_SERVER_URL=redis://redis-XXXXX.c14.us-east-1-2.ec2.cloud.redislabs.com:14054
JSON_REDIS_PASSWORD=ABCDXYZA3tzw2XYMPi2P8UPm19D
LOG_LEVEL=1
USE_REDIS=1
REDIS_REINDEX=
PRODUCTION_WEBSITE_URL=i

Step 5. Run the developer environment

 npm install
npm run dev

Step 6. Pull Hacker News API to seed database

Using API, it pulls the latest hackernews data. Next, you need to seed top stories from hacker news. First create a moderator with moderator:password123

 node ./backend/scripts/seed.js

Step 7. Access the HackerNews URL

Open https://localhost:3001 and you should be able to access the HackerNews login screen as shown below:

hackernews

How it works

By Screens

Signup

Signup Screen

  • Make sure user(where username is andy1) does not exist.
 FT.SEARCH idx:user @username:"andy1" NOCONTENT LIMIT 0 1 SORTBY _id DESC
  • Get and increase the next id in users collection.
 GET user:id-indicator // 63
INCR user:id-indicator // 64 will be next user id, 63 is current user id
  • Create user:63 hash and json.(json also collects authToken and password hash etc)
  HSET user:63 username andy1 email  created 1615569194 karma 0 about  showDead false isModerator false shadowBanned false banned false _id 63
  JSON.SET user:63 .
 '{"username":"andy1","password":"$2a$10$zy8tsCske8MfmDX5CcWMce5S1U7PJbPI7CfaqQ7Bo1PORDeqJxqhe","authToken":"AAV07FIwTiEkNrPj0x1yj6BPJQSGIPzV0sICw2u0","  authTokenExpiration":1647105194,"email":"","created":1615569194,"karma":0,"showDead":false,"isModerator":false,"shadowBanned":false,"banned":false,"_id":63}'

Login

Login Screen

  • Find user
 FT.SEARCH idx:user  @username:"andy1" NOCONTENT LIMIT 0 1 SORTBY _id DESC
  • Make sure password is correct
 JSON.MGET user:63 .
  • Compare password and new password hash and create cookie if it's successful

Item list page

Newest Screen

  • Check if user has toggled hidden attribute on a specific item.
 FT.SEARCH idx:user-hidden  @username:"andy1" NOCONTENT LIMIT 0 10000 SORTBY _id DESC
// Result - [0, "item:4"]
  • If that is not null
 FT.SEARCH idx:item  (-(@id:"item:4")) (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY _id ASC
  • If it's empty array
 FT.SEARCH idx:item (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY _id ASC
// Result - [3,"item:1","item:2","item:3"]
  • Get all items from Redis using JSON.MGET
 JSON.MGET item:1 item:2 item:3 .
// Result - [{"id":"bkWCjcyJu5WT","by":"todsacerdoti","title":"Total Cookie
Protection","type":"news","url":"https://blog.mozilla.org/security/2021/02/23/total-cookie-
protection/","domain":"mozilla.org","points":1,"score":1514,"commentCount":0,"created":1614089461,"dead":false,"_id":3}]]
  • Get items posted within last 1 week
 FT.SEARCH idx:item  (@created:[(1615652598 +inf]) (@dead:"false") NOCONTENT LIMIT 0 0 SORTBY _id DESC
// Result - [13,"item:19","item:17","item:16","item:15","item:14","item:13","item:12","item:11","item:8","item:5","item:4","item:3","item:1"]
note

In this case, 1615652598 is a timestamp of 1 week ealier than current timestamp

 JSON.MGET item:19 item:17 item:16 item:15 item:14 item:13 item:12 item:11 item:8 item:5 item:4 item:3 item:1 .
// Result - the JSON of selected items

Item Detail

Item Detail Screen

  • Get the item object first
 JSON.MGET item:1 .
  • Find item:1 's root comments
 FT.SEARCH idx:comment  (@parentItemId:"kDiN0RhTivmJ") (@isParent:"true") (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY points ASC
// Result - [3,"comment:1","comment:2","comment:12"]
  • Get those comments
 JSON.MGET comment:1 comment:2 comment:12 .
// one comment example result - {"id":"jnGWS8TTOecC","by":"ploxiln","parentItemId":"kDiN0RhTivmJ","parentItemTitle":"The Framework
Laptop","isParent":true,"parentCommentId":"","children":[13,17,20],"text":"I don&#x27;t see any mention of the firmware and drivers efforts for this.
Firmware and drivers always end up more difficult to deal with than expected.<p>The Fairphone company was surprised by difficulties upgrading and
patching android without support from their BSP vendor, causing many months delays of updates _and_ years shorter support life than they were
planning for their earlier models.<p>I purchased the Purism Librem 13 laptop from their kickstarter, and they had great plans for firmware and
drivers, but also great difficulty following through. The trackpad chosen for the first models took much longer than expected to get upstream linux
support, and it was never great (it turned out to be impossible to reliably detect their variant automatically). They finally hired someone with
sufficient skill to do the coreboot port _months_ after initial units were delivered, and delivered polished coreboot firmware for their initial
laptops _years_ after they started the kickstarter.<p>So, why should we have confidence in the firmware and drivers that Framework will deliver
:)","points":1,"created":1614274058,"dead":false,"_id":12}
  • Using children of each comment, fetch children comments
 FT.SEARCH idx:comment  (@dead:"false") (@_id:("3"|"7"|"11")) NOCONTENT LIMIT 0 10000 SORTBY _id DESC
  • Iterate this over until all comments are resolved

Submit

Submit Screen

  • Get next item's id and increase it
 GET item:id-indicator
// Result - 4
SET item:id-indicator 5
  • Create hash and index
 HSET item:4 id iBi8sU4HRcZ2 by andy1 title Firebase trends type ask url  domain  text Firebase Performance Monitoring is a service that helps you to
gain insight into the performance characteristics of your iOS, Android, and web apps. points 1 score 0 created 1615571392 dead false _id 4
 JSON.SET item:4 . '{"id":"iBi8sU4HRcZ2","by":"andy1","title":"Firebase trends","type":"ask","url":"","domain":"","text":"Firebase Performance
Monitoring is a service that helps you to gain insight into the performance characteristics of your iOS, Android, and web
apps.","points":1,"score":0,"commentCount":0,"created":1615571392,"dead":false,"_id":4}'

Update Profile

Update Profile Screen

  • Get the user
 FT.SEARCH idx:user  (@username:"andy1") NOCONTENT LIMIT 0 1 SORTBY _id DESC
 JSON.MGET user:63 .
  • Update new user
 HSET user:63 username andy1 email  created 1615569194 karma 1 about I am a software engineer. showDead false isModerator false shadowBanned false
banned false _id 63
 JSON.SET user:63 .
'{"username":"andy1","password":"$2a$10$zy8tsCske8MfmDX5CcWMce5S1U7PJbPI7CfaqQ7Bo1PORDeqJxqhe","authToken":"KJwPLN1idyQrMp5qEY5hR3VhoPFTKRcC8Npxxoju"," authTokenExpiration":1647106257,"email":"","created":1615569194,"karma":1,"about":"I am a software
engineer.","showDead":false,"isModerator":false,"shadowBanned":false,"banned":false,"_id":63}'

Moderation Logs screen

Moderation Logs

  • Find all moderation logs
 FT.SEARCH idx:moderation-log * NOCONTENT LIMIT 0 0 SORTBY _id DESC
// Result - [1,"moderation-log:1"]
  • Get that moderation logs
 JSON.MGET moderation-log:1 .

Search Screen

  • Get items that contains "fa"
 FT.SEARCH idx:item  (@title:fa*) (-(@id:"aaaaaaaaa")) (@dead:"false") NOCONTENT LIMIT 0 30 SORTBY score ASC
// Result - [2,"item:18","item:16"]
  • Get those items via json
 JSON.MGET item:18 item:16 .

Example commands

There are 2 type of fields, indexed and non-indexed.

  1. Indexed fields will be stored in hash using HSET/HGET.
  2. Non-indexed fields will be stored in JSON.
  • Create an index

When schema is created, it should created index.

 FT.CREATE idx:user ON hash PREFIX 1 "user:" SCHEMA username TEXT SORTABLE email TEXT SORTABLE karma NUMERIC SORTABLE
  • Drop search index

Should drop/update index if the schema has changed

 FT.DROPINDEX idx:user
  • Get search info

Validate if the fields are indexed properly. If not, it will update the index fields or drop/recreate.

 FT.INFO idx:user
  • Create a new user

It will require new hash and new JSON record

 HSET user:andy username "andy" email "andy@gmail.com" karma 0
 JSON.SET user:andy '{"passoword": "hashed_password", "settings": "{ \"showDead\": true }" }'
  • Update a user
 HSET user:1 username "newusername"
 JSON.SET user:andy username "newusername"
  • Find user with username 'andy'
  1. Find the user's hash first
 FT.SEARCH idx:user '@username:{andy}'
  1. Fetch the JSON object to get the related JSON object
 JSON.GET user:andy
  • Find user whose id is andy1 or andy2
 FT.SEARCH idx:user '@id:("andy1"|"andy2")'
  • Find user whose id is not andy1 or andy2
 FT.SEARCH idx:user '(-(@id:("andy1"|"andy2")))'
  • Find user whose id is andy1 or username is andy
 FT.SEARCH idx:user '(@id:"andy1") | (@username:"andy")'
  • Find user whose id is andy1 and username is andy
 FT.SEARCH idx:user '(@id:"andy1") (@username:"andy")'
  • Find first 10 users order by username
 FT.SEARCH idx:user '*' LIMIT 0 10 SORTBY username ASC
  • Find next 10 users
 FT.SEARCH idx:user '*' LIMIT 10 20 SORTBY username ASC
  • Get from JSON from multiple keys
 JSON.MGET idx:user "andy1" "andy2" "andy3"

References

- + \ No newline at end of file diff --git a/howtos/herokujava/index.html b/howtos/herokujava/index.html index ff851668c4..19890576a3 100644 --- a/howtos/herokujava/index.html +++ b/howtos/herokujava/index.html @@ -4,7 +4,7 @@ How to build a Java based application on Heroku using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to build a Java based application on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Step 1. Create Redis Cloud

Create your free Redis Cloud account. Follow this link to create Redis Cloud subscription and database as shown below:

heroku

Save the database endpoint URL and password for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Cloud

For this demonstration, we will be using a Sample Rate Limiting application

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-java
heroku create
Creating app... done, ⬢ hidden-woodland-03996
https://hidden-woodland-03996.herokuapp.com/ | https://git.heroku.com/hidden-woodland-03996.git

Step 6. Setting up environment variables

Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for reference.

heroku

You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

Step 7. Deploy your code

Heroku generates a random name (in this case hidden-woodland-03996) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

$ git push heroku
remote: BUILD SUCCESSFUL in 1m 5s
remote: 12 actionable tasks: 12 executed
remote: -----> Discovering process types
remote: Procfile declares types -> web
remote:
remote: -----> Compressing...
remote: Done: 298.9M
remote: -----> Launching...
remote: Released v3
remote: https://hidden-woodland-03996.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/hidden-woodland-03996.git
* [new branch] master -> master

Step 8. Accessing the application

Open https://hidden-woodland-03996.herokuapp.com/ to see your application

heroku

- + \ No newline at end of file diff --git a/howtos/herokunodejs/index.html b/howtos/herokunodejs/index.html index 0e876cfec6..853acc6f53 100644 --- a/howtos/herokunodejs/index.html +++ b/howtos/herokunodejs/index.html @@ -4,7 +4,7 @@ How to build a NodeJS based application on Heroku using Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to build a NodeJS based application on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Step 1. Create Redis Cloud

Create your free Redis Cloud account. Follow this link to create Redis Cloud subscription and database as shown below:

heroku

Save the database endpoint URL and password for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Cloud

For this demonstration, we will be using a Sample Rate Limiting application

Clone the repository

 git clone https://github.com/redis-developer/basic-redis-rate-limiting-demo-nodejs

Run the below CLI to have a functioning Git repository that contains a simple application as well as a package.json file.

heroku create
Creating app... done, ⬢ rocky-lowlands-06306
https://rocky-lowlands-06306.herokuapp.com/ | https://git.heroku.com/rocky-lowlands-06306.git

Step 6. Setting up environment variables

Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for the reference.

heroku

You now have a functioning Git repository that contains a simple application as well as a package.json file, which is used by Node’s dependency manager.

Step 7. Deploy your code

$ git push heroku

Wait for few seconds and you will see the below messages being displayed:

remote: -----> Launching...
remote: Released v3
remote: https://rocky-lowlands-06306.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/rocky-lowlands-06306.git
* [new branch] main -> main

Step 8. Accessing the application

Open https://rocky-lowlands-06306.herokuapp.com/ to see your application

heroku

- + \ No newline at end of file diff --git a/howtos/herokupython/index.html b/howtos/herokupython/index.html index e3ea208c11..aed844aafa 100644 --- a/howtos/herokupython/index.html +++ b/howtos/herokupython/index.html @@ -4,7 +4,7 @@ How to build a Python based application on Heroku using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to build a Python based application on Heroku using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Step 1. Create Redis Cloud

Create your free Redis Cloud account. Follow this link to create Redis Cloud subscription and database as shown below:

heroku

Save the database endpoint URL and password for future reference.

Step 2. Create a Heroku account

If you are using Heroku for the first time, create your new Heroku account through this link

heroku

Step 3. Install Heroku CLI on your system

 brew install heroku

Step 4. Login to Heroku

 heroku login
heroku: Press any key to open up the browser to login or q to exit:
Opening browser to https://cli-auth.heroku.com/auth/cli/browser/XXXXXXXXXXA
Logging in... done
Logged in as your_email_address

Step 5. Connect your application to Redis Cloud

For this demonstration, we will be using a Sample Rate Limiting application

Clone the repository

 git clone https://github.com/redis-developer/basic-rate-limiting-demo-python

Run the below CLI to have a functioning Git repository that contains a simple application as well as a package.json file.

$ heroku create
Creating app... done, ⬢ fast-reef-76278
https://fast-reef-76278.herokuapp.com/ | https://git.heroku.com/fast-reef-76278.git

Step 6. Setting up environment variables

Go to Heroku dashboard, click "Settings" and set REDIS_ENDPOINT_URI and REDIS_PASSWORD under the Config Vars. Refer to Step 1 for reference.

heroku

Step 7. Deploy your code

Heroku generates a random name (in this case fast-reef-76278) for your app, or you can pass a parameter to specify your own app name. Now deploy your code:

$ git push heroku
Enumerating objects: 512, done.
Counting objects: 100% (512/512), done.
Delta compression using up to 12 threads
Compressing objects: 100% (256/256), done.
Writing objects: 100% (512/512), 1.52 MiB | 660.00 KiB/s, done.
Total 512 (delta 244), reused 512 (delta 244)
remote: Compressing source files... done.
remote: Building source:
remote:
remote: -----> Building on the Heroku-20 stack
remote: -----> Determining which buildpack to use for this app
remote: -----> Python app detected


emote: -----> Compressing...
remote: Done: 59.3M
remote: -----> Launching...
remote: Released v5
remote: https://fast-reef-76278.herokuapp.com/ deployed to Heroku
remote:
remote: Verifying deploy... done.
To https://git.heroku.com/fast-reef-76278.git
* [new branch] master -> master

Step 8. Accessing the application

Open https://fast-reef-76278.herokuapp.com/ to see your application

heroku

- + \ No newline at end of file diff --git a/howtos/index-modules/index.html b/howtos/index-modules/index.html index a21d8b8f36..b20d989eaf 100644 --- a/howtos/index-modules/index.html +++ b/howtos/index-modules/index.html @@ -4,7 +4,7 @@ index-modules | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /modules/ custom_edit_url:


~

- + \ No newline at end of file diff --git a/howtos/leaderboard/index.html b/howtos/leaderboard/index.html index 86cc86aee7..f615976836 100644 --- a/howtos/leaderboard/index.html +++ b/howtos/leaderboard/index.html @@ -4,7 +4,7 @@ How to build a Real-Time Leaderboard app Using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to build a Real-Time Leaderboard app Using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

The concept of a leaderboard—a scoreboard showing the ranked names and current scores (or other data points) of the leading competitors—is essential to the world of computer gaming, but leaderboards are now about more than just games. They are about gamification, a broader implementation that can include any group of people with a common goal (coworkers, students, sales groups, fitness groups, volunteers, and so on).

Leaderboards can encourage healthy competition in a group by openly displaying the current ranking of each group member. They also provide a clear way to view the ongoing achievements of the entire team as members move towards a goal. Gamification of tasks and goals via leaderboards is a great way to motivate people by providing them with constant feedback of where they rank in comparison to other group members. Done well, this can lead to healthy competition that builds group cohesion.

My Image

Step 1. Install the below software

Step 2. Clone the repository

git clone https://github.com/redis-developer/basic-redis-leaderboard-demo-java

Step 3. Run docker compose

docker network create global
docker-compose up -d --build

Step 4. Verifying if containers are up and running

 docker-compose ps
Name Command State Ports
--------------------------------------------------------------------------------------------------
redis.redisleaderboard.docker docker-entrypoint.sh redis ... Up 127.0.0.1:55000->6379/tcp

Step 5. Copy .env.example to create .env

Provide the values for environment variables (if needed)

- REDIS_URL: Redis database endpoint URL
- REDIS_HOST: Redis server host
- REDIS_PORT: Redis server port
- REDIS_DB: Redis server db index
- REDIS_PASSWORD: Redis server password

If you're using Redis Enterprise Cloud, you must supply DB endpoint, password, port and the name of the database. In case of local system, the entries look like as shown below:

REDIS_URL=
REDIS_HOST=redis://localhost
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DB=

Step 6. Run the backend

  • Install gradle

Follow the following link https://gradle.org/install/ for your MacOS

brew install gradle
  • Install JDK

Follow the following link https://docs.oracle.com/javase/10/install/installation-jdk-and-jre-macos.htm for your MacOS

export $(cat .env | xargs)

Step 7. Run the wrapper task

To use Wrapper, we need to generate some particular files. We'll generate these files using the built-in Gradle task called wrapper. Note that we need to generate these files only once.

Now, let's run the wrapper task in our project directory:

gradle wrapper

It should show the below results:

Welcome to Gradle 6.8.3!

Here are the highlights of this release:
- Faster Kotlin DSL script compilation
- Vendor selection for Java toolchains
- Convenient execution of tasks in composite builds
- Consistent dependency resolution

For more details see https://docs.gradle.org/6.8.3/release-notes.html

Starting a Gradle Daemon (subsequent builds will be faster)

BUILD SUCCESSFUL in 29s
1 actionable task: 1 executed

Step 8. Perform the build task

The Gradle Wrapper is now available for building your project. It's time to run the wrapper script to perform the build task.

./gradlew build
% ./gradlew build
Downloading https://services.gradle.org/distributions/gradle-6.8.3-bin.zip
..........10%..........20%..........30%...........40%..........50%..........60%..........70%...........80%..........90%..........100%
Starting a Gradle Daemon, 1 incompatible Daemon could not be reused, use --status for details

> Task :test
2021-03-01 07:08:42.962 INFO 3624 --- [extShutdownHook] o.s.s.concurrent.ThreadPoolTaskExecutor : Shutting down ExecutorService 'applicationTaskExecutor'

BUILD SUCCESSFUL in 1m 13s
12 actionable tasks: 12 executed

Step 9. Run your application

./gradlew run
> Task :run

. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v2.4.1)

2021-03-01 07:09:59.610 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : Starting BasicRedisLeaderLoardDemoJavaApplication using Java 13.0.2 on Ajeets-MacBook-Pro.local with PID 3672 (/Users/ajeetraina/projects/basic-redis-leaderboard-demo-java/build/classes/java/main started by ajeetraina in /Users/ajeetraina/projects/basic-redis-leaderboard-demo-java)
2021-03-01 07:09:59.614 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : No active profile set, falling back to default profiles: default
2021-03-01 07:09:59.661 INFO 3672 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : Devtools property defaults active! Set 'spring.devtools.add-properties' to 'false' to disable
2021-03-01 07:09:59.661 INFO 3672 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : For additional web related logging consider setting the 'logging.level.web' property to 'DEBUG'
2021-03-01 07:10:00.481 INFO 3672 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 5000 (http)
2021-03-01 07:10:00.492 INFO 3672 --- [ restartedMain] o.apache.catalina.core.StandardService : Starting service [Tomcat]
2021-03-01 07:10:00.492 INFO 3672 --- [ restartedMain] org.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/9.0.41]
2021-03-01 07:10:00.551 INFO 3672 --- [ restartedMain] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
2021-03-01 07:10:00.551 INFO 3672 --- [ restartedMain] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 889 ms
2021-03-01 07:10:00.756 INFO 3672 --- [ restartedMain] o.s.s.concurrent.ThreadPoolTaskExecutor : Initializing ExecutorService 'applicationTaskExecutor'
2021-03-01 07:10:00.845 INFO 3672 --- [ restartedMain] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page: URL [file:/Users/ajeetraina/projects/basic-redis-leaderboard-demo-java/assets/index.html]
2021-03-01 07:10:00.949 INFO 3672 --- [ restartedMain] .s.s.UserDetailsServiceAutoConfiguration :

Using generated security password: ea2d5326-b04c-4f93-b771-57bcb53f656e

2021-03-01 07:10:01.016 INFO 3672 --- [ restartedMain] o.s.s.web.DefaultSecurityFilterChain : Will secure any request with [org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter@583fa06c, org.springframework.security.web.context.SecurityContextPersistenceFilter@524c0386, org.springframework.security.web.header.HeaderWriterFilter@c6e5d4e, org.springframework.security.web.authentication.logout.LogoutFilter@3e1f33e9, org.springframework.security.web.savedrequest.RequestCacheAwareFilter@6790427f, org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter@40ddf86, org.springframework.security.web.authentication.AnonymousAuthenticationFilter@1412ffa9, org.springframework.security.web.session.SessionManagementFilter@3eb6c20f, org.springframework.security.web.access.ExceptionTranslationFilter@21646e94, org.springframework.security.web.access.intercept.FilterSecurityInterceptor@649e1b25]
2021-03-01 07:10:01.043 INFO 3672 --- [ restartedMain] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729
2021-03-01 07:10:01.065 INFO 3672 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 5000 (http) with context path ''
2021-03-01 07:10:01.093 INFO 3672 --- [ restartedMain] BasicRedisLeaderLoardDemoJavaApplication : Started BasicRedisLeaderLoardDemoJavaApplication in 1.937 seconds (JVM running for 2.327)
<=========----> 75% EXECUTING [17s]
> :run

Step 10. Access the leaderboard application

My Image

How it works?

How the data is stored:

  • The AAPL's details - market cap of 2.6 triillions and USA origin - are stored in a hash like below:

     HSET "company:AAPL" symbol "AAPL" market_cap "2600000000000" country USA
  • The Ranks of AAPL of 2.6 trillions are stored in a ZSET.

     ZADD  companyLeaderboard 2600000000000 company:AAPL

How the data is accessed:

  • Top 10 companies:

     ZREVRANGE companyLeaderboard 0 9 WITHSCORES
  • All companies:

     ZREVRANGE companyLeaderboard 0 -1 WITHSCORES
  • Bottom 10 companies:

     ZRANGE companyLeaderboard 0 9 WITHSCORES
  • Between rank 10 and 15:

     ZREVRANGE companyLeaderboard 9 14 WITHSCORES
  • Show ranks of AAPL, FB and TSLA:

     ZREVRANGE  companyLeaderBoard company:AAPL company:FB company:TSLA
  • Adding 1 billion to market cap of FB company:

     ZINCRBY companyLeaderBoard 1000000000 "company:FB"
  • Reducing 1 billion of market cap of FB company:

     ZINCRBY companyLeaderBoard -1000000000 "company:FB"
  • Companies between 500 billion and 1 trillion:

     ZCOUNT companyLeaderBoard 500000000000 1000000000000
  • Companies over a Trillion:

     ZCOUNT companyLeaderBoard 1000000000000 +inf

References

- + \ No newline at end of file diff --git a/howtos/moviesdatabase/advancedoption/index.html b/howtos/moviesdatabase/advancedoption/index.html index 9a7408f292..c534a174f8 100644 --- a/howtos/moviesdatabase/advancedoption/index.html +++ b/howtos/moviesdatabase/advancedoption/index.html @@ -4,7 +4,7 @@ 9. Advanced Option | The Home of Redis Developers - + @@ -12,7 +12,7 @@

9. Advanced Option

Create an index using a Filter

In the previous examples, the indices were created using a PREFIX, where all the keys matching the type and prefix are indexed.

It is also possible to create an index using a filter, for example create an index with all the "Drama" movies released between 1990 and 2000 (2000 not included).

The FILTER` expression is using the aggregation filter syntax, for example for the genre and release year it will be

  • FILTER "@genre=='Drama' && @release_year>=1990 && @release_year<2000"

So when you create the index:

FT.CREATE idx:drama ON Hash PREFIX 1 "movie:" FILTER "@genre=='Drama' && @release_year>=1990 && @release_year<2000" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE

You can run the FT.INFO idx:drama command to look at the index definitions and statistics.

Notes

  • The PREFIX is not optional.
  • In this appliation this index is not useful since you can get the same data from the idx:movie

You can check that the data has been indexed by running the following queries that should return the same number of documents.

On idx:drama

> FT.SEARCH idx:drama "  @release_year:[1990 (2000]" LIMIT 0 0

1) (integer) 24

On idx"movie

> FT.SEARCH idx:movie "@genre:{Drama}  @release_year:[1990 (2000]" LIMIT 0 0

1) (integer) 24
- + \ No newline at end of file diff --git a/howtos/moviesdatabase/aggregation/index.html b/howtos/moviesdatabase/aggregation/index.html index ffd19003e9..68d14ca029 100644 --- a/howtos/moviesdatabase/aggregation/index.html +++ b/howtos/moviesdatabase/aggregation/index.html @@ -4,7 +4,7 @@ 8. Aggregations | The Home of Redis Developers - + @@ -12,7 +12,7 @@

8. Aggregations

A common need for applications, in addition to retrieving information as a document list, like you have done with the "FT.SEARCH" command, is to do some "aggregation".

For example if we look at the movie documents, you may want to retrieve the number of movies grouped by release year starting with the most recent ones.

For this, Redis Stack provides the FT.AGGREGATE command, with aggregations described as a data processing pipeline.

Let's check out some examples.

Group By & Sort By

Number of movies by year
> FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies

1) (integer) 60
2) 1) "release_year"
2) "1964"
3) "nb_of_movies"
4) "9"
...
61) 1) "release_year"
2) "2010"
3) "nb_of_movies"
4) "15"

Number of movies by year from the most recent to the oldest
> FT.AGGREGATE "idx:movie" "*" GROUPBY 1 @release_year REDUCE COUNT 0 AS nb_of_movies SORTBY 2 @release_year DESC

1) (integer) 60
2) 1) "release_year"
2) "2019"
3) "nb_of_movies"
4) "14"
...
11) 1) "release_year"
2) "2010"
3) "nb_of_movies"
4) "15"

Number of movies by genre, with the total number of votes, and average rating
> FT.AGGREGATE idx:movie "*" GROUPBY 1 @genre REDUCE COUNT 0 AS nb_of_movies REDUCE SUM 1 votes AS nb_of_votes REDUCE AVG 1 rating AS avg_rating SORTBY 4 @avg_rating DESC @nb_of_votes DESC


1) (integer) 26
2) 1) "genre"
2) "fantasy"
3) "nb_of_movies"
4) "1"
5) "nb_of_votes"
6) "1500090"
7) "avg_rating"
8) "8.8"
...
11) 1) "genre"
2) "romance"
3) "nb_of_movies"
4) "2"
5) "nb_of_votes"
6) "746"
7) "avg_rating"
8) "6.65"

Count the number of females by country sorted from the biggest to smallest number.
> FT.AGGREGATE idx:user "@gender:{female}" GROUPBY 1 @country REDUCE COUNT 0 AS nb_of_users SORTBY 2 @nb_of_users DESC

1) (integer) 193
2) 1) "country"
2) "china"
3) "nb_of_users"
4) "537"
...
11) 1) "country"
2) "ukraine"
3) "nb_of_users"
4) "72"

Apply Functions

Number of logins per year and month

The idx:user index contains the last_login field. This field stores the last login time as an EPOC timestamp.

Redis Stack search aggregation allows you to apply transformations to each record. This is done using the APPLY parameter.

For this example you have to use a date/time function to extract the month and year from the timestamp.

> FT.AGGREGATE idx:user * APPLY year(@last_login) AS year APPLY "monthofyear(@last_login) + 1" AS month GROUPBY 2 @year @month REDUCE count 0 AS num_login SORTBY 4 @year ASC @month ASC

1) (integer) 13
2) 1) "year"
2) "2019"
3) "month"
4) "9"
5) "num_login"
6) "230"
...
14) 1) "year"
2) "2020"
3) "month"
4) "9"
5) "num_login"
6) "271"


Number of logins per weekday

Using the date/time Apply functions it is possible to extract the day of the week from the timestamp, so let's see how the logins are distributed over the week.

> FT.AGGREGATE idx:user * APPLY "dayofweek(@last_login) +1" AS dayofweek GROUPBY 1 @dayofweek REDUCE count 0 AS num_login SORTBY 2 @dayofweek ASC

1) (integer) 7
2) 1) "dayofweek"
2) "1"
3) "num_login"
4) "815"
...
8) 1) "dayofweek"
2) "7"
3) "num_login"
4) "906"


Filter

In the previous example you used the query string parameter to select all documents ("*") or a subset of the documents ("@gender:{female}")

It is also possible to filter the results using a predicate expression relating to values in each result. This is applied post-query and relates to the current state of the pipeline. This is done using the FILTER parameter.

Count the number of females by country, except China, with more than 100 users, and sorted from the biggest to lowest number
> FT.AGGREGATE idx:user "@gender:{female}" GROUPBY 1 @country  REDUCE COUNT 0 AS nb_of_users  FILTER "@country!='china' && @nb_of_users > 100" SORTBY 2 @nb_of_users DESC

1) (integer) 163
2) 1) "country"
2) "indonesia"
3) "nb_of_users"
4) "309"
...
6) 1) "country"
2) "brazil"
3) "nb_of_users"
4) "108"

Number of login per month, for year 2020

This is similar to the previous query with the addition of a filter on the year.

> FT.AGGREGATE idx:user * APPLY year(@last_login) AS year APPLY "monthofyear(@last_login) + 1" AS month GROUPBY 2 @year @month REDUCE count 0 AS num_login  FILTER "@year==2020" SORTBY 2 @month ASC

1) (integer) 13
2) 1) "year"
2) "2020"
3) "month"
4) "1"
5) "num_login"
6) "520"
...
10) 1) "year"
2) "2020"
3) "month"
4) "9"
5) "num_login"
6) "271"


- + \ No newline at end of file diff --git a/howtos/moviesdatabase/create/index.html b/howtos/moviesdatabase/create/index.html index db1fc83ed5..2301cc29fa 100644 --- a/howtos/moviesdatabase/create/index.html +++ b/howtos/moviesdatabase/create/index.html @@ -4,7 +4,7 @@ 3. Create Index | The Home of Redis Developers - + @@ -12,7 +12,7 @@

3. Create Index

Before creating the index let's describe the dataset and insert entries.

Sample Dataset

In this project you will use a simple dataset describing movies, for now, all records are in English. You will learn more about other languages in another tutorial.

A movie is represented by the following attributes:

  • movie_id : The unique ID of the movie, internal to this database
  • title : The title of the movie.
  • plot : A summary of the movie.
  • genre : The genre of the movie, for now a movie will only have a single genre.
  • release_year : The year the movie was released as a numerical value.
  • rating : A numeric value representing the public's rating for this movie.
  • votes : Number of votes.
  • poster : Link to the movie poster.
  • imdb_id : id of the movie in the IMDB database.

Key and Data structure

As a Redis developer, one of the first things to look when building your application is to define the structure of the key and data (data design/data modeling).

A common way of defining the keys in Redis is to use specific patterns in them. For example in this application where the database will probably deal with various business objects: movies, actors, theaters, users, ... we can use the following pattern:

  • business_object:key

For example:

  • movie:001 for the movie with the id 001
  • user:001 the user with the id 001

and for the movies information you should use a Redis Hash.

A Redis Hash allows the application to structure all the movie attributes in individual fields; also Redis Stack will index the fields based on the index definition.

Insert Movies

It is time now to add some data into your database, let's insert a few movies, using redis-cli or RedisInsight.

Once you are connected to your Redis instance run the following commands:


> HSET movie:11002 title "Star Wars: Episode V - The Empire Strikes Back" plot "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth, Luke Skywalker begins Jedi training with Yoda, while his friends are pursued by Darth Vader and a bounty hunter named Boba Fett all over the galaxy." release_year 1980 genre "Action" rating 8.7 votes 1127635 imdb_id tt0080684

> HSET movie:11003 title "The Godfather" plot "The aging patriarch of an organized crime dynasty transfers control of his clandestine empire to his reluctant son." release_year 1972 genre "Drama" rating 9.2 votes 1563839 imdb_id tt0068646

> HSET movie:11004 title "Heat" plot "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist." release_year 1995 genre "Thriller" rating 8.2 votes 559490 imdb_id tt0113277

> HSET "movie:11005" title "Star Wars: Episode VI - Return of the Jedi" genre "Action" votes 906260 rating 8.3 release_year 1983 plot "The Rebels dispatch to Endor to destroy the second Empire's Death Star." ibmdb_id "tt0086190"

Now it is possible to get information from the hash using the movie ID. For example if you want to get the title, and rating execute the following command:

> HMGET movie:11002 title rating

1) "Star Wars: Episode V - The Empire Strikes Back"
2) "8.7"

And you can increment the rating of this movie using:

> HINCRBYFLOAT movie:11002 rating 0.1
"8.8"

But how do you get a movie or list of movies by year of release, rating or title?

One option, would be to read all the movies, check all fields and then return only matching movies; no need to say that this is a really bad idea.

Nevertheless this is where Redis developers often create custom secondary indexes using SET/SORTED SET structures that point back to the movie hash. This needs some heavy design and implementation.

This is where Search and Query in Redis Stack can help, and why it was created.

Search & Indexing

Redis Stack greatly simplifies this by offering a simple and automatic way to create secondary indices on Redis Hashes. (more datastructure will eventually come)

Secondary Index

When using Redis Stack, if you want to query on a field, you must first index that field. Let's start by indexing the following fields for our movies:

  • Title
  • Release Year
  • Rating
  • Genre

When creating a index you define:

  • which data you want to index: all hashes with a key starting with movies
  • which fields in the hashes you want to index using a Schema definition.

Warning: Do not index all fields

Indexes take space in memory, and must be updated when the primary data is updated. So create the index carefully and keep the definition up to date with your needs.

Create the Index

Create the index with the following command:

> FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE release_year NUMERIC SORTABLE rating NUMERIC SORTABLE genre TAG SORTABLE

Before running some queries let's look at the command in detail:

  • FT.CREATE : creates an index with the given spec. The index name will be used in all the key names so keep it short.
  • idx:movie : the name of the index
  • ON hash : the type of structure to be indexed.
  • PREFIX 1 "movie:" : the prefix of the keys that should be indexed. This is a list, so since we want to only index movie:* keys the number is 1. Suppose you want to index movies and tv_show that have the same fields, you can use: PREFIX 2 "movie:" "tv_show:"
  • SCHEMA ...: defines the schema, the fields and their type, to index, as you can see in the command, we are using TEXT, NUMERIC and TAG, and SORTABLE parameters.

You can find information about the FT.CREATE command in the documentation.

You can look at the index information with the following command:

> FT.INFO idx:movie
- + \ No newline at end of file diff --git a/howtos/moviesdatabase/import/index.html b/howtos/moviesdatabase/import/index.html index 0f6f72a872..12ceb2ff1d 100644 --- a/howtos/moviesdatabase/import/index.html +++ b/howtos/moviesdatabase/import/index.html @@ -4,7 +4,7 @@ 6. Import datasets | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Sample Dataset

In the previous steps you used only a few movies, let's now import:

  • More movies to discover more queries.
  • Theaters to discover the geospatial capabilities.
  • Users to do some aggregations.

Dataset Description

Movies

The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_movies.redis is a script that creates 922 Hashes.

The movie hashes contain the following fields.

  • movie:id : The unique ID of the movie, internal to this database (used as the key of the hash)
  • title : The title of the movie.
  • plot : A summary of the movie.
  • genre : The genre of the movie, for now a movie will only have a single genre.
  • release_year : The year the movie was released as a numerical value.
  • rating : A numeric value representing the public's rating for this movie.
  • votes : Number of votes.
  • poster : Link to the movie poster.
  • imdb_id : id of the movie in the IMDB database.
<details>
<summary>Sample Data: <b>movie:343</b></summary>
<table>
<thead>
<tr>
<th>Field</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<th>title</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Spider-Man
</td>
</tr>
<tr>
<th>plot</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
    When bitten by a genetically modified spider a nerdy shy and awkward high school student gains spider-like abilities that he eventually must use to fight evil as a superhero after tragedy befalls his family.
        </td>
</tr>
<tr>
<th>genre</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Action
</td>
</tr>
<tr>
<th>release_year</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
2002
</td>
</tr>
<tr>
<th>rating</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
7.3
</td>
</tr>
<tr>
<th>votes</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
662219
</td>
</tr>
<tr>
<th>poster</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
https://m.media-amazon.com/images/M/MV5BZDEyN2NhMjgtMjdhNi00MmNlLWE5YTgtZGE4MzNjMTRlMGEwXkEyXkFqcGdeQXVyNDUyOTg3Njg@._V1_SX300.jpg
</td>
</tr>
<tr>
<th>imdb_id</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
tt0145487
</td>
</tr>
<tbody>
</table>
</details>

Theaters

The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_theaters.redis is a script that creates 117 Hashes (used for Geospatial queries). This dataset is a list of New York Theaters, and not movie theaters, but it is not that critical for this project ;).

The theater hashes contain the following fields.

  • theater:id : The unique ID of the theater, internal to this database (used as the key of the hash)
  • name : The name of the theater
  • address : The street address
  • city : The city, in this sample dataset all the theaters are in New York
  • zip : The zip code
  • phone : The phone number
  • url : The URL of the theater
  • location : Contains the longitude,latitude used to create the Geo-indexed field
<details>
<summary>Sample Data: <b>theater:20</b></summary>
<table>
<thead>
<tr>
<th>Field</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<th>name</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Broadway Theatre
</td>
</tr>
<tr>
<th>address</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
1681 Broadway
</td>
</tr>
<tr>
<th>city</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
New York
</td>
</tr>
<tr>
<th>zip</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
10019
</td>
</tr>
<tr>
<th>phone</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
212 944-3700
</td>
</tr>
<tr>
<th>url</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
http://www.shubertorganization.com/theatres/broadway.asp
</td>
</tr>
<tr>
<th>location</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
-73.98335054631019,40.763270202723625
</td>
</tr>
<tbody>
</table>
</details>

Users

The file https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_users.redis is a script that creates 5996 Hashes.

The user hashes contain the following fields.

  • user:id : The unique ID of the user.
  • first_name : The first name of the user.
  • last_name : The last name of the user.
  • email : The email of the user.
  • gender : The gender of the user (female/male).
  • country : The country name of the user.
  • country_code : The country code of the user.
  • city : The city of the user.
  • longitude : The longitude of the user.
  • latitude : The latitude of the user.
  • last_login : The last login time for the user, as EPOC time.
  • ip_address : The IP address of the user.
<details>
<summary>Sample Data: <b>user:3233</b></summary>
<table>
<thead>
<tr>
<th>Field</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<th>first_name</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Rosetta
</td>
</tr>
<tr>
<th>last_name</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Olyff
</td>
</tr>
<tr>
<th>email</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
rolyff6g@163.com
</td>
</tr>
<tr>
<th>gender</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
female
</td>
</tr>
<tr>
<th>country</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
China
</td>
</tr>
<tr>
<th>country_code</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
CN
</td>
</tr>
<tr>
<th>city</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
Huangdao
</td>
</tr>
<tr>
<th>longitude</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
120.04619
</td>
</tr>
<tr>
<th>latitude</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
35.872664
</td>
</tr>
<tr>
<th>last_login</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
1570386621
</td>
</tr>
<tr>
<th>ip_address</th>
<td style='font-family:monospace; font-size: 0.875em; "'>
218.47.90.79
</td>
</tr>
<tbody>
</table>
</details>

Importing the Movies, Theaters and Users

Before importing the data, flush the database:

> FLUSHALL

The easiest way to import the file is to use the redis-cli, using the following terminal command:

$ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_movies.redis | redis-cli -h localhost -p 6379 --pipe

$ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_theaters.redis | redis-cli -h localhost -p 6379 --pipe


$ curl -s https://raw.githubusercontent.com/RediSearch/redisearch-getting-started/master/sample-app/redisearch-docker/dataset/import_users.redis | redis-cli -h localhost -p 6379 --pipe

Using Redis Insight or the redis-cli you can look at the dataset:

> HMGET "movie:343" title release_year genre

1) "Spider-Man"
2) "2002"
3) "Action"


> HMGET "theater:20" name location
1) "Broadway Theatre"
2) "-73.98335054631019,40.763270202723625"



> HMGET "user:343" first_name last_name last_login
1) "Umeko"
2) "Castagno"
3) "1574769122"

You can also use the DBSIZE command to see how many keys you have in your database.


Create Indexes

Create the idx:movie index:

> FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT SORTABLE plot TEXT WEIGHT 0.5 release_year NUMERIC SORTABLE rating NUMERIC SORTABLE votes NUMERIC SORTABLE genre TAG SORTABLE

"OK"

The movies have now been indexed, you can run the FT.INFO "idx:movie" command and look at the num_docs returned value. (should be 922).

Create the idx:theater index:

This index will mostly be used to show the geospatial capabilties of search in Redis Stack.

In the previous examples we have created indexes with 3 types:

  • Text
  • Numeric
  • Tag

You will now discover a new type of field: Geo.

The theater hashes contains a field location with the longitude and latitude, that will be used in the index as follows:

> FT.CREATE idx:theater ON hash PREFIX 1 "theater:" SCHEMA name TEXT SORTABLE location GEO

"OK"

The theaters have been indexed, you can run the FT.INFO "idx:theater" command and look at the num_docs returned value. (should be 117).

Create the idx:user index:

> FT.CREATE idx:user ON hash PREFIX 1 "user:" SCHEMA gender TAG country TAG SORTABLE last_login NUMERIC SORTABLE location GEO

"OK"
- + \ No newline at end of file diff --git a/howtos/moviesdatabase/index.html b/howtos/moviesdatabase/index.html index 646ddae248..ead0a8cfd4 100644 --- a/howtos/moviesdatabase/index.html +++ b/howtos/moviesdatabase/index.html @@ -4,7 +4,7 @@ How to list and search Movies Database using Redis Stack | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to list and search Movies Database using Redis Stack

- + \ No newline at end of file diff --git a/howtos/moviesdatabase/install/index.html b/howtos/moviesdatabase/install/index.html index e5b24d6cfc..d9bdd9beb4 100644 --- a/howtos/moviesdatabase/install/index.html +++ b/howtos/moviesdatabase/install/index.html @@ -4,7 +4,7 @@ 2. Install Redis Stack | The Home of Redis Developers - + @@ -12,7 +12,7 @@

2. Install Redis Stack

You have multiple ways to run Redis Stack:

Let's use Docker for now.

1.1 Open a terminal an run the following command

> docker run -it --rm --name redis-stack-latest \
-p 6379:6379 \
redis/redis-stack:latest
note

The container will automatically be removed when it exits (--rm parameter).

You have now a Redis instance running with Redis Stack installed, let's discover the basics.


- + \ No newline at end of file diff --git a/howtos/moviesdatabase/manage/index.html b/howtos/moviesdatabase/manage/index.html index 9e39b5be3d..3dd7af8917 100644 --- a/howtos/moviesdatabase/manage/index.html +++ b/howtos/moviesdatabase/manage/index.html @@ -4,7 +4,7 @@ 5. Manage Index | The Home of Redis Developers - + @@ -12,7 +12,7 @@

5. Manage Index

Listing and inspecting the indexes

The FT._LIST command provides a list of all indexes in your database:

> FT._LIST
1) "idx:movie"

FT.INFO provides information about a specific index:

> FT.INFO "idx:movie"

1) "index_name"
2) "idx:movie"
...
5) "index_definition"
...
7) "fields"
...
9) "num_docs"
10) "4"
...

Updating your Indexing

As you are build your application and add more information to the database you may need to add new fields to the index. The FT.ALTER command enables you to do this.

> FT.ALTER idx:movie SCHEMA ADD plot TEXT WEIGHT 0.5
"OK"

The WEIGHT declares the importance of this field when calculating result accuracy. This is a multiplication factor (default is 1); so in this example the plot is less important than the title.

Let's do a query with the new indexed field:

> FT.SEARCH idx:movie "empire @genre:{Action}" RETURN 2 title plot

Dropping the Index

You can drop an index using the FT.DROPINDEX command.

> FT.DROPINDEX idx:movie

"OK"

Dropping an index does not impact the indexed hashes, this means that the movies are still inside the database.

>SCAN 0 MATCH movie:*

1) "0"
2) 1) "movie:11002"
2) "movie:11004"
3) "movie:11003"
4) "movie:11005"
note

You can delete the indexed document/hashes by adding the DD parameter.

- + \ No newline at end of file diff --git a/howtos/moviesdatabase/query/index.html b/howtos/moviesdatabase/query/index.html index 0512bf9ed6..9adf7ec129 100644 --- a/howtos/moviesdatabase/query/index.html +++ b/howtos/moviesdatabase/query/index.html @@ -4,7 +4,7 @@ 4. Query Data | The Home of Redis Developers - + @@ -12,7 +12,7 @@

4. Query Data

The database contains a few movies, and an index, it is now possible to execute some queries.

Queries

Example : All the movies that contains the string "war"

> FT.SEARCH idx:movie "war"

1) (integer) 2
2) "movie:11005"
3) 1) "title"
2) "Star Wars: Episode VI - Return of the Jedi"
...
14) "tt0086190"
4) "movie:11002"
5) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
...
13) "imdb_id"
14) "tt0080684"

The FT.SEARCH commands returns a list of results starting with the number of results, then the list of elements (keys & fields).

As you can see the movie Star Wars: Episode V - The Empire Strikes Back is found, even though you used only the word “war” to match “Wars” in the title. This is because the title has been indexed as text, so the field is tokenized and stemmed.

Later when looking at the query syntax in more detail you will learn more about the search capabilities.

It is also possible to limit the list of fields returned by the query using the RETURN parameter, let's run the same query, and return only the title and release_year:

> FT.SEARCH idx:movie "war" RETURN 2 title release_year

1) (integer) 2
2) "movie:11005"
3) 1) "title"
2) "Star Wars: Episode VI - Return of the Jedi"
3) "release_year"
4) "1983"
4) "movie:11002"
5) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "release_year"
4) "1980"

This query does not specify any "field" and still returns some movies, this is because Search in Redis Stack will search all TEXT fields by default. In the current index only the title is present as a TEXT field. You will see later how to update an index, to add more fields to it.

If you need to perform a query on a specific field you can specify it using the @field: syntax, for example:

> FT.SEARCH idx:movie "@title:war" RETURN 2 title release_year

Example : All the movies that contains the string "war but NOT the jedi one"

Adding the string -jedi (minus) will ask the query engine not to return values that contain jedi.

> FT.SEARCH idx:movie "war -jedi" RETURN 2 title release_year

1) (integer) 1
2) "movie:11002"
3) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "release_year"
4) "1980"

Example : All the movies that contains the string "gdfather using fuzzy search"

As you can see the word godfather contains a spelling error, it can however be matched using fuzzy matching. Fuzzy matches are performed based on Levenshtein distance (LD).

> FT.SEARCH idx:movie " %gdfather% " RETURN 2 title release_year

1) (integer) 1
2) "movie:11003"
3) 1) "title"
2) "The Godfather"
3) "release_year"
4) "1972"

Example : All Thriller movies"

The genre fields is indexed as a TAG and allows exact match queries.

The syntax to query a TAG field is @field_name:{value}

> FT.SEARCH idx:movie "@genre:{Thriller}" RETURN 2 title release_year

1) (integer) 1
2) "movie:11004"
3) 1) "title"
2) "Heat"
3) "release_year"
4) "1995"


Example : All Thriller or Action movies"

> FT.SEARCH idx:movie "@genre:{Thriller|Action}" RETURN 2 title release_year

1) (integer) 3
2) "movie:11004"
3) 1) "title"
2) "Heat"
3) "release_year"
4) "1995"
4) "movie:11005"
5) 1) "title"
2) "Star Wars: Episode VI - Return of the Jedi"
3) "release_year"
4) "1983"
6) "movie:11002"
7) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "release_year"
4) "1980"

You can find more information about the Tag filters in the documentation.


Example : All Thriller or Action movies that does not have Jedi in the title"

> FT.SEARCH idx:movie "@genre:{Thriller|Action} @title:-jedi" RETURN 2 title release_year

1) (integer) 2
2) "movie:11004"
3) 1) "title"
2) "Heat"
3) "release_year"
4) "1995"
4) "movie:11002"
5) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "release_year"
4) "1980"

Example : All the movies released between 1970 and 1980 (included)

The FT.SEARCH syntax has two ways to query numeric fields:

  • using the FILTER parameter

or

  • using the @field in the query string.
> FT.SEARCH idx:movie * FILTER release_year 1970 1980 RETURN 2 title release_year
> FT.SEARCH idx:movie "@release_year:[1970 1980]" RETURN 2 title release_year

1) (integer) 2
2) "movie:11003"
3) 1) "title"
2) "The Godfather"
3) "release_year"
4) "1972"
4) "movie:11002"
5) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "release_year"
4) "1980"

To exclude a value prepend it with ( in the FILTER or query string, for example to exclude 1980:

> FT.SEARCH idx:movie "@release_year:[1970 (1980]" RETURN 2 title release_year

Insert, Update, Delete and Expire Documents

As part of this tutorial you have:

  1. Created few movies, as Redis hashes (that we call document) with the following key pattern movie:*
  2. Created an index using the FT.CREATE command
  3. Queried the data using FT.SEARCH

When creating the index, using the idx:movie ON hash PREFIX 1 "movie:" parameter you are asking the indexing engine to look at all existing keys and index them.

Also new information that matches this pattern/type, will be indexed.

Let's count the number of movies, add a new one, and count again:

> FT.SEARCH idx:movie "*" LIMIT 0 0

1) (integer) 4


> HSET movie:11033 title "Tomorrow Never Dies" plot "James Bond sets out to stop a media mogul's plan to induce war between China and the U.K in order to obtain exclusive global media coverage." release_year 1997 genre "Action" rating 6.5 votes 177732 imdb_id tt0120347

> FT.SEARCH idx:movie "*" LIMIT 0 0

1) (integer) 5

The new movie has been indexed. You can also search on any of the indexed fields:

> FT.SEARCH idx:movie "never" RETURN 2 title release_year

1) (integer) 1
2) "movie:11033"
3) 1) "title"
2) "Tomorrow Never Dies"
3) "release_year"
4) "1997"

Now you update one of the field, and search for 007

> HSET movie:11033 title "Tomorrow Never Dies - 007"


> FT.SEARCH idx:movie "007" RETURN 2 title release_year

1) (integer) 1
2) "movie:11033"
3) 1) "title"
2) "Tomorrow Never Dies - 007"
3) "release_year"
4) "1997"

When you delete the hash, the index is also updated, and the same happens when the key expires (TTL-Time To Live).

For example, set the James Bond movie to expire in 20 seconds time:

> EXPIRE "movie:11033" 20

You can run the following query, and you will that the document expires after 20 seconds and the search query will not return any results, showing that the index has been updated.

> FT.SEARCH idx:movie "007" RETURN 2 title release_year

1) (integer)

tip

When you are using Redis as your primary database you are not necessarily using TTLs to delete records. However, if the data you are storing and indexing are transient (e.g a caching layer at the top of another datastore or Web service, query user sessions content, etc.), this is often qualified as an "Ephemeral Search" use case: lightweight, fast and expiration.


More

You have many additional features regarding indexing and searching that you can find in the documentation:

Let's see how to inspect, modify and drop an index.

- + \ No newline at end of file diff --git a/howtos/moviesdatabase/querymovies/index.html b/howtos/moviesdatabase/querymovies/index.html index 170df925b7..6d53889a10 100644 --- a/howtos/moviesdatabase/querymovies/index.html +++ b/howtos/moviesdatabase/querymovies/index.html @@ -4,7 +4,7 @@ 7. Query Movies | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Querying the Movie Dataset

As described earlier in the tutorial, one of the goals of search and query in Redis Stack is to provide rich querying capabilities such as:

  • simple and complex conditions
  • sorting
  • pagination
  • counting

Conditions

The best way to start to work with Redis Stack query capabilities is to look at the various conditions options.

> FT.SEARCH "idx:movie" "heat" RETURN 2 title plot

1) (integer) 4
2) "movie:1141"
3) 1) "title"
2) "Heat"
3) "plot"
4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."
4) "movie:818"
5) 1) "title"
2) "California Heat"
3) "plot"
4) "A lifeguard bets he can be true to just one woman."
6) "movie:736"
7) 1) "title"
2) "Chicago Justice"
3) "plot"
4) "The State's Attorney's dedicated team of prosecutors and investigators navigates heated city politics and controversy head-on,while fearlessly pursuing justice."
8) "movie:1109"
9) 1) "title"
2) "Love & Hip Hop: Miami"
3) "plot"
4) "'Love and Hip Hop Miami' turns up the heat and doesn't hold back in making the 305 the place to be. Multi-platinum selling hip-hop legend Trick Daddy is back in the studio collaborating ..."

The first line contains the number of documents (4) that match the query condition, then the list of movies.

This query is a "fieldless" condition, this means that the query engine has:

  • searched in all the TEXT fields of the index(title and plot)
  • for the word heat and related words, this is why the movie:736 is returned since it has the word heated in the plot (stemming)
  • returned the result sorted by score, remember that the title has a weight of 1.0, and the plot a weight of 0.5. So when the word or related words are found in the title the score is larger.

In this case you have to set the criteria to a the field title using the @title notation.

> FT.SEARCH "idx:movie" "@title:heat" RETURN 2 title plot
1) (integer) 2
2) "movie:1141"
3) 1) "title"
2) "Heat"
3) "plot"
4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."
4) "movie:818"
5) 1) "title"
2) "California Heat"
3) "plot"
4) "A lifeguard bets he can be true to just one woman."

So only 2 movies are returned.

Find all the movies where the title contains 'heat' and does NOT contains 'california'

For this you add parentheses around the field condition and add the - sign to 'california'.

> FT.SEARCH "idx:movie" "@title:(heat -california)" RETURN 2 title plot
1) (integer) 1
2) "movie:1141"
3) 1) "title"
2) "Heat"
3) "plot"
4) "A group of professional bank robbers start to feel the heat from police when they unknowingly leave a clue at their latest heist."

Only one movie is returned.

If you do not put the ( .. ) the -california condition will be applied to all the text fields.

You can do test this with the following queries:

> FT.SEARCH "idx:movie" "@title:(heat -woman)" RETURN 2 title plot
> FT.SEARCH "idx:movie" "@title:heat -woman" RETURN 2 title plot

As you can see the first query only searches for woman in the title and returns two movies "Heat" and "California Heat", where the second query eliminates "California Heat" from the list since the plot contains the word woman.

- + \ No newline at end of file diff --git a/howtos/moviesdatabase/sampleapp/index.html b/howtos/moviesdatabase/sampleapp/index.html index d7e324cf0b..7519c61e36 100644 --- a/howtos/moviesdatabase/sampleapp/index.html +++ b/howtos/moviesdatabase/sampleapp/index.html @@ -4,7 +4,7 @@ 10. Sample Application | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Application Development

It is time now to see how to use search with Redis Stack in your application.

Run the Sample Application

The application and all the services, including Redis Stack, are available as a Docker Compose application.

If you have not already downloaded the project, clone it:

> git clone https://github.com/RediSearch/redisearch-getting-started.git

> cd redisearch-getting-started

To run the application:

> cd sample-app

> docker-compose up --force-recreate --build

This Docker Compose will start:

  1. Redis Stack instance on port 6380, and import all movies, actors and create indexes
  2. The Java, Node and Python REST Services available on port 8085, 8086, 8087
  3. The frontend on port 8084

Once started you can access the application and its services using the following URLs:

  • http://localhost:8084
  • http://localhost:8085/api/1.0/movies/search?q=star&offset=0&limit=10
  • http://localhost:8086/api/1.0/movies/search?q=star&offset=0&limit=10
  • http://localhost:8087/api/1.0/movies/search?q=star&offset=0&limit=10

Stop and Delete Everything

Run the following command to delete the containers & images:

> docker-compose down -v --rmi local --remove-orphans
- + \ No newline at end of file diff --git a/howtos/nlp/index.html b/howtos/nlp/index.html index d74f061457..ebe71decfe 100644 --- a/howtos/nlp/index.html +++ b/howtos/nlp/index.html @@ -4,7 +4,7 @@ Building a Pipeline for Natural Language Processing using RedisGears | The Home of Redis Developers - + @@ -16,7 +16,7 @@ One need to change a few default parameters for rgcluster to accommodate the size of PyTorch and spacy libraries (each over 1GB zipped), gist with settings.

Step 5. Create or activate Python virtual environment

 cd ./the-pattern-platform/

Step 6. Create new environment

You can create it via

 conda create -n pattern_env python=3.8

or

Alternatively, you can activate by using the below CLI:

 source ~/venv_cord19/bin/activate #or create new venv
pip install -r requirements.txt

Step 7. Run pipeline

 bash cluster_pipeline.sh

Step 8. Validating the functionality of the NLP pipeline

Wait for a bit and then check:

Verifying Redis Graph populated:

 redis-cli -p 9001 -h 127.0.0.1 GRAPH.QUERY cord19medical "MATCH (n:entity) RETURN count(n) as entity_count"
redis-cli -p 9001 -h 127.0.0.1 GRAPH.QUERY cord19medical "MATCH (e:entity)-[r]->(t:entity) RETURN count(r) as edge_count"

Checking API responds:

 curl -i -H "Content-Type: application/json" -X POST -d '{"search":"How does temperature and humidity affect the transmission of 2019-nCoV"}'
http://localhost:8080/gsearch

Walkthrough

While RedisGears allows to deploy and run Machine Learning libraries like spacy and BERT transformers, the solution above uses simpler approach:

 gb = GB('KeysReader')
gb.filter(filter_language)
gb.flatmap(parse_paragraphs)
gb.map(spellcheck_sentences)
gb.foreach(save_sentences)
gb.count()
gb.register('paragraphs:*',keyTypes=['string','hash'], mode="async_local")

This is the overall pipeline: those 7 lines allow you to run logic in a distributed cluster or on a single machine using all available CPUs - no changes required until you need to scale over more 1000 nodes. I use KeysReader registered for namespace paragraphs for all strings or hashes. My pipeline would need to run in async mode. For data scientists, I would recommend using gb.run to make sure gears function work and it will run in batch mode and then change it to register - to capture new data. By default, functions will return output, hence the need for count() - to prevent fetching the whole dataset back to the command issuing machine (90 GB for Cord19).

Overall pre-processing is a straightforward - full code is here.

Things to keep in mind:

  1. Node process can only save locally - we don't move data, anything you want to save should have hashtag, for example to add to the set of processed_docs:
 execute('SADD','processed_docs_{%s}' % hashtag(),article_id)
  1. Loading external libraries into the computational threat, for example, symspell requires additional dictionaries and needs two steps to load:
 """
load symspell and relevant dictionaries
"""
sym_spell=None

def load_symspell():
import pkg_resources
from symspellpy import SymSpell, Verbosity
sym_spell = SymSpell(max_dictionary_edit_distance=1, prefix_length=7)
dictionary_path = pkg_resources.resource_filename(
"symspellpy", "frequency_dictionary_en_82_765.txt")
bigram_path = pkg_resources.resource_filename(
"symspellpy", "frequency_bigramdictionary_en_243_342.txt")
# term_index is the column of the term and count_index is the
# column of the term frequency
sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)
sym_spell.load_bigram_dictionary(bigram_path, term_index=0, count_index=2)
return sym_spell
  1. Scispacy is a great library and data science tool, but after a few iterations with deploying it I ended up reading data model documentation for UMLS Methathesaurus and decided to build Aho-Corasick automata directly from UMLS data. (MRXW_ENG.RRF contains all terms form for English mapped to CUI). Aho-Corasick allowed me to match incoming sentences into pairs of nodes (concepts from the medical dictionary) and present sentences as edges in a graph, Gears related code is simple:
 bg = GearsBuilder('KeysReader')
bg.foreach(process_item)
bg.count()
bg.register('sentence:*', mode="async_local",onRegistered=OnRegisteredAutomata)

OnRegisteredAutomata will perform similarly to symspell example above except it will download pre-build Aho-Corasick automata (30Mb). Aho-Corasick is a very fast matcher and allows to perform >900 Mb text per second even on commodity laptop, RedisGears cluster makes a very smooth distribution of data and ML model and matching using available CPU and Memory. Full matcher code.

Output of the matcher: nodes and edges are candidates to use another RedisGears pattern rgsync where you can write fast into Redis and RedisGears are going to replicate data into slower storage using RedisStreams. But I decided to use streams and handcraft the population of the RedisGraph database, which will be focus of the next blog post.

Output of the matcher: nodes and edges are candidates to use another RedisGears pattern rgsync where you can write fast into Redis and RedisGears are going to replicate data into slower storage using RedisStreams, while this demo uses streams and populates RedisGraph database with nodes and edges calculating rank of each.

Call to action

We took OCR scans in JSON format and turned them into Knowledge Graph, demonstrating how you can traditional Semantic Network/OWL/Methathesaurus technique based on Unified Medical Language System. Redis Ecosystem offers a lot to the data science community, and can take place at the core of Kaggle notebooks, ML frameworks and make deployment and distribution of data more enjoyable. The success of our industry depends on how our tools work together — regardless of whether they are engineering, data science, machine learning and organisational or architectural.

With the collaboration of RedisLabs and community, the full pipeline code is available via https://github.com/applied-knowledge-systems/the-pattern-platform. In case, you want to try it locally, then you can find a Docker Launch script in the root of the repository along with short quickstart guide. PR and suggestions are welcome. The overall goal of the project is to allow other to build their more interesting pipeline on top of it.

References

- + \ No newline at end of file diff --git a/howtos/popupstore/index.html b/howtos/popupstore/index.html index f0bc09d8b4..7efddf77df 100644 --- a/howtos/popupstore/index.html +++ b/howtos/popupstore/index.html @@ -4,7 +4,7 @@ Building a Popup Store application using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Building a Popup Store application using Redis


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Pop-up stores are becoming a popular channel for retailers to create a new revenue stream, generate buzz with customers, test product concepts, or unload excess inventory. Since the idea is to spin up the store quickly and then close it shortly thereafter, it doesn’t make sense to spend a lot of time on development. With the right Redis modules, you can create a robust customer experience without a lot of development effort.

This pop-up store demo illustrates a company that sells a single product and has 10,000 units available for purchase. Each customer can purchase one unit and the sale lasts only 10 minutes, so order processing must be instantaneous. The demo shows how to visualize data pipeline in real-time using Redis Streams, Redis Time Series, RedisGears and Redis Datasource with Grafana.

Pop-up

Step 1. Cloning the repository

git clone https://github.com/redis-developer/redis-pop-up-store/

Step 2. Running the application

docker-compose up -d

Step 3. Accessing Grafana dashboard

Open http://IPAddress:3000 to access the grafana dashboard

Grafana

Grafana query streams and Time-Series keys every 5 seconds to display samples using Grafana Redis Datasource.This Grafana dashboard displays:

  • Product Available: the value of product key, which decreases as orders complete
  • Customers Ordering, Orders Processing, and Orders Completed: the length of queue:customers, queue:orders, and queue:complete streams
  • Customers Overflow: the difference between customer-submitted orders and orders completed
  • Customers Ordering: orders created in 5 seconds
  • Orders In Queue: orders waiting to be processed
  • Completed Flow: orders completed in 5 seconds

How it works

Diagram

  • Node.js script adds random data to Customers and Orders streams
  • RedisGears is using StreamReader to watch all queue: keys and adding Time-Series samples
# Add Time-Series
def tsAdd(x):
xlen = execute('XLEN', x['key'])
execute('TS.ADD', 'ts:len:'+x['key'], '*', xlen)
execute('TS.ADD', 'ts:enqueue:' + x['key'], '*', x['value'])


# Stream Reader for any Queue
gb = GearsBuilder('StreamReader')
gb.countby(lambda x: x['key']).map(tsAdd)
gb.register(prefix='queue:*', duration=5000, batch=10000, trimStream=False)
  • Another RedisGears script completes orders
    • adding data to queue:complete stream
    • deleting client's ordering
    • decreasing product amount
    • trimming Orders queue
# Complete order
def complete(x):
execute('XADD', 'queue:complete', '*', 'order', x['id'],
'customer', x['value']['customer'])
execute('XDEL', 'queue:customers', x['value']['customer'])
execute('DECR', 'product')


# Stream Reader for Orders queue
gb = GearsBuilder('StreamReader')
gb.map(complete)
gb.register(prefix='queue:orders', batch=3, trimStream=True)

Addition Resources

- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheet/index.html b/howtos/quick-start/cheat-sheet/index.html index 50bea4968d..2320cd7f82 100644 --- a/howtos/quick-start/cheat-sheet/index.html +++ b/howtos/quick-start/cheat-sheet/index.html @@ -4,7 +4,7 @@ Redis Commands Cheat sheet | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Redis Commands Cheat sheet


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

Connect

# Syntax
redis-cli -u redis://host:port
redis-cli -u redis://username:password@host:port

# Examples
redis-cli
redis-cli -u redis://localhost:6379
redis-cli -u redis://myuser:mypassword@localhost:6379

# If you run Redis through Docker
docker exec -it <container-id-or-name> redis-cli

note

To setup Redis either locally or in the cloud, refer to the tutorial

Strings/Numbers

CommandSyntaxExampleOutput
SETSET key value

SET myKey "Hello"
"OK"
Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
GETGET key

GET myKey
"Hello"
Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
MGETMGET key [key ...]

MGET myKey nonExistentKey
1) "Hello" 2) (nil)
Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
INCRINCR key

INCR myCounter
(integer) 1
Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)

Generic

CommandSyntaxExampleOutput
KEYSKEYS pattern

KEYS my*
1) "myKey" 2) "myCounter"
Description: Returns all keys matching pattern.Time Complexity: O(N)
EXISTSEXISTS key [key ...]

EXISTS myKey
(integer) 1
Description: Checks if one or more keys exist.Time Complexity: O(N)
EXPIREEXPIRE key seconds

EXPIRE myKey 120
(integer) 1
Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
TTLTTL key

TTL myKey
(integer) 113
Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
PERSISTPERSIST key

PERSIST myKey
(integer) 1
Description: Removes the expiration from a key.Time Complexity:O(1)
SCANSCAN cursor [MATCH pattern] [COUNT count]

SCAN 0 MATCH my* COUNT 2
1) "3" 2) 1) "myCounter" 2) "myKey"
Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
DELDEL key [key ...]

DEL myKey
(integer) 1
Description: Removes the specified keys.Time Complexity: O(N)
INFOINFO [section]

INFO server
INFO keyspace
# Server
redis_version:6.2.5
redis_git_sha1:00000000
redis_build_id:9893b2a-dirty
redis_mode:standalone
os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
arch_bits:64
...
# Keyspace db0:keys=2,expires=0,avg_ttl=0
Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)

Hashes

CommandSyntaxExampleOutput
HSETHSET key field value [field value ...]

HSET h_employee_profile:101 name "Nicol" age 33
(integer) 2
Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
HGETHGET key field

HGET h_employee_profile:101 name
"Nicol"
Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
HGETALLHGETALL key

HGETALL h_employee_profile:101
1) "name" 2) "Nicol" 3) "age" 4) "33"
Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
HMGETHMGET key field1 [field2]

HMGET h_employee_profile:101 name age
1) "Nicol" 2) "33"
Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)

Sets

CommandSyntaxExampleOutput
SADDSADD key member [member ...]

SADD mySet "Hello"
(integer) 1
Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
SMEMBERSSMEMBERS key

SMEMBERS mySet
1) "Hello"
Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
SCARDSCARD key

SCARD mySet
(integer) 1
Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
SISMEMBERSISMEMBER key member

SISMEMBER mySet "Hello"
(integer) 1
Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
SDIFFSDIFF key1 [key2]

SDIFF mySet myOtherSet
1) "Hello"
Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
SDIFFSTORESDIFFSTORE destination key1 [key2]

SDIFFSTORE myNewSet mySet myOtherSet
(integer) 1
Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
SREMSREM key member [member ...]

SREM mySet "Hello"
(integer) 1
Description: Removes the specified members from the set stored at key.

Sorted sets

CommandSyntaxExampleOutput
ZADDZADD key score member [score member ...]

ZADD myZSet 1 "one" 2 "two"
(integer) 2
Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
ZRANGEZRANGE key start stop [WITHSCORES]

ZRANGE myZSet 0 -1
1) "one" 2)"two"
Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned

Lists

CommandSyntaxExampleOutput
LPUSHLPUSH key value [value ...]

LPUSH myList "World"
(integer) 1
Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
RPUSHRPUSH key value [value ...]

RPUSH myList "Hello"
(integer) 2
Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
LRANGELRANGE key start stop

LRANGE myList 0 -1
1) "World" 2) "Hello"
Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
LLENLLEN key

LLEN myList
(integer) 2
Description: Returns the length of the list stored at key.Time Complexity: O(1)
LPOPLPOP key [count]

LPOP myList
"World"
Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
RPOPRPOP key [count]

RPOP myList
"Hello"
Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)

Streams

CommandSyntaxExampleOutput
XADDXADD key field value [field value ...]

XADD myStream * sensorId "1234" temperature "19.8"
1518951480106-0
Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

XREAD COUNT 2 STREAMS myStream 0
1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
XRANGEXRANGE key start end [COUNT count]

XRANGE myStream 1518951480106-0 1518951480106-0
1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
XLENXLEN key

XLEN myStream
(integer) 1
Description: Returns the number of entries of a stream. Time Complexity: O(1)
XDELXDEL key ID [ID ...]

XDEL myStream 1518951480106-0
(integer) 1
Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
XTRIMXTRIM key MAXLEN [~] count

XTRIM myStream MAXLEN 0
(integer) 0
Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.

 


Redis stack commands

Redis stack extends the core features of Redis OSS like querying across hashes and JSON documents, time series data support, full-text search ..etc

JSON

CommandSyntaxExampleOutput
JSON.SETJSON.SET key path value

JSON.SET employee_profile:1 . '{"name":"Alice"}'
OK
Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
JSON.GETJSON.GET key [path [path ...]]

JSON.GET employee_profile:1

{ "name": 'Alice' }
Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.NUMINCRBYJSON.NUMINCRBY key path number

JSON.SET employee_profile:1 .age 30
JSON.NUMINCRBY employee_profile:1 .age 5
35
Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.OBJKEYSJSON.OBJKEYS key [path]

JSON.OBJKEYS employee_profile:1
1) "name" 2) "age"
Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.OBJLENJSON.OBJLEN key [path]

JSON.OBJLEN employee_profile:1
(integer) 2
Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
(integer) 4
Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
(integer) 5
Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

JSON.ARRINDEX employee_profile:1 .colors '"purple"'
(integer) 2
Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key

Search and Query

CommandSyntaxExampleOutput
FT.CREATE

FT.CREATE index
[ON HASH | JSON]
[PREFIX count prefix [prefix ...]]
[FILTER {filter}]
SCHEMA
field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
[NOINDEX]
...

FT.CREATE staff:index
ON JSON
PREFIX 1 staff:
SCHEMA
"$.name" AS name TEXT
"$.age" AS age NUMERIC
"$.isSingle" AS isSingle TAG
'$["skills"][*]' AS skills TAG SEPARATOR "|"
OK
Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
FT.SEARCH

FT.SEARCH index query
[FILTER numeric_field min max [ FILTER numeric_field min max ...]]
[RETURN count identifier [AS property] [ identifier [AS property] ...]]
[SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
[LIMIT offset num]
[PARAMS nargs name value [ name value ...]]

JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

FT.SEARCH staff:index
"(@name:'alex')"
RETURN 1 $ LIMIT 0 10

FT.SEARCH staff:index
"((@isSingle:{true}) (@age:[(18 +inf]))"
RETURN 1 $ LIMIT 0 10
Matching documents data
Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
FT.AGGREGATE

FT.AGGREGATE index query
[LOAD count field [field ...]]
[ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
[ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
[ APPLY expression AS name ...
[ LIMIT offset num]
[FILTER filter]
[ PARAMS nargs name value [ name value ...]]

FT.AGGREGATE staff:index "(@age:[(18 +inf])"
GROUPBY 1 @age
REDUCE COUNT_DISTINCT 1 @name AS staff_count

| age | staff_count |
| ----| ------------|
| 22 | 1 |
| 45 | 1 |
Description: Run a search query on an index, and perform aggregate transformations on the results.
FT.INFOFT.INFO index

FT.INFO staff:index
A list of configuration parameters and stats for the index.
Description: Return information and statistics on the index.Time Complexity: O(1)
FT.DROPINDEXFT.DROPINDEX index [DD]

FT.DROPINDEX staff:index
OK
Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/connect/index.html b/howtos/quick-start/cheat-sheets/connect/index.html index 0c064e860e..0afcb7af31 100644 --- a/howtos/quick-start/cheat-sheets/connect/index.html +++ b/howtos/quick-start/cheat-sheets/connect/index.html @@ -4,7 +4,7 @@ connect | The Home of Redis Developers - + @@ -12,7 +12,7 @@

connect

# Syntax
redis-cli -u redis://host:port
redis-cli -u redis://username:password@host:port

# Examples
redis-cli
redis-cli -u redis://localhost:6379
redis-cli -u redis://myuser:mypassword@localhost:6379

# If you run Redis through Docker
docker exec -it <container-id-or-name> redis-cli

- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/generic/index.html b/howtos/quick-start/cheat-sheets/generic/index.html index 6110bceb11..cc38fb8d3a 100644 --- a/howtos/quick-start/cheat-sheets/generic/index.html +++ b/howtos/quick-start/cheat-sheets/generic/index.html @@ -4,7 +4,7 @@ generic | The Home of Redis Developers - + @@ -12,7 +12,7 @@

generic

CommandSyntaxExampleOutput
KEYSKEYS pattern

KEYS my*
1) "myKey" 2) "myCounter"
Description: Returns all keys matching pattern.Time Complexity: O(N)
EXISTSEXISTS key [key ...]

EXISTS myKey
(integer) 1
Description: Checks if one or more keys exist.Time Complexity: O(N)
EXPIREEXPIRE key seconds

EXPIRE myKey 120
(integer) 1
Description: Set a timeout on a key.After the timeout has expired, the key will automatically be deleted.Time Complexity:O(1)
TTLTTL key

TTL myKey
(integer) 113
Description: Returns the remaining time to live of a key that has a timeout.Time Complexity: O(1)
PERSISTPERSIST key

PERSIST myKey
(integer) 1
Description: Removes the expiration from a key.Time Complexity:O(1)
SCANSCAN cursor [MATCH pattern] [COUNT count]

SCAN 0 MATCH my* COUNT 2
1) "3" 2) 1) "myCounter" 2) "myKey"
Description: Iterates the set of keys in the currently selected Redis database.Time Complexity: O(1) for every call. O(N) for a complete iteration.
DELDEL key [key ...]

DEL myKey
(integer) 1
Description: Removes the specified keys.Time Complexity: O(N)
INFOINFO [section]

INFO server
INFO keyspace
# Server
redis_version:6.2.5
redis_git_sha1:00000000
redis_build_id:9893b2a-dirty
redis_mode:standalone
os:Linux 5.4.72-microsoft-standard-WSL2 x86_64
arch_bits:64
...
# Keyspace db0:keys=2,expires=0,avg_ttl=0
Description:Returns information and statistics about the server, with the different sections like - server, clients, memory, persistence, stats, replication, cpu, commandstats, latencystats, sentinel, cluster, modules, keyspace, errorstats.Time Complexity: O(1)
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/hashes/index.html b/howtos/quick-start/cheat-sheets/hashes/index.html index 8c23c96cf5..6e6eb1d23a 100644 --- a/howtos/quick-start/cheat-sheets/hashes/index.html +++ b/howtos/quick-start/cheat-sheets/hashes/index.html @@ -4,7 +4,7 @@ hashes | The Home of Redis Developers - + @@ -12,7 +12,7 @@

hashes

CommandSyntaxExampleOutput
HSETHSET key field value [field value ...]

HSET h_employee_profile:101 name "Nicol" age 33
(integer) 2
Description: Sets the specified fields to their respective values in the hash stored at key.Time Complexity: O(N)
HGETHGET key field

HGET h_employee_profile:101 name
"Nicol"
Description: Returns the value associated with field in the hash stored at key.Time Complexity: O(1)
HGETALLHGETALL key

HGETALL h_employee_profile:101
1) "name" 2) "Nicol" 3) "age" 4) "33"
Description: Returns all fields and values of the hash stored at key.Time Complexity: O(N)
HMGETHMGET key field1 [field2]

HMGET h_employee_profile:101 name age
1) "Nicol" 2) "33"
Description: Returns the values associated with the specified fields in the hash stored at key.Time Complexity: O(N)
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/json/index.html b/howtos/quick-start/cheat-sheets/json/index.html index f0f00d273d..e878719e84 100644 --- a/howtos/quick-start/cheat-sheets/json/index.html +++ b/howtos/quick-start/cheat-sheets/json/index.html @@ -4,7 +4,7 @@ json | The Home of Redis Developers - + @@ -12,7 +12,7 @@

json

CommandSyntaxExampleOutput
JSON.SETJSON.SET key path value

JSON.SET employee_profile:1 . '{"name":"Alice"}'
OK
Description: Sets JSON value at path in key.Time Complexity: O(M+N) where M is the original size and N is the new size
JSON.GETJSON.GET key [path [path ...]]

JSON.GET employee_profile:1

{ "name": 'Alice' }
Description: Returns the JSON value at path in key.Time Complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.NUMINCRBYJSON.NUMINCRBY key path number

JSON.SET employee_profile:1 .age 30
JSON.NUMINCRBY employee_profile:1 .age 5
35
Description: Increments a number inside a JSON document.Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.OBJKEYSJSON.OBJKEYS key [path]

JSON.OBJKEYS employee_profile:1
1) "name" 2) "age"
Description: Return the keys in the object that's referenced by path. Time Complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.OBJLENJSON.OBJLEN key [path]

JSON.OBJLEN employee_profile:1
(integer) 2
Description: Report the number of keys in the JSON object at path in key. Time Complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.ARRAPPENDJSON.ARRAPPEND key [path] value [value ...]

JSON.SET employee_profile:1 .colors '["red", "green", "blue"]'
JSON.ARRAPPEND employee_profile:1 .colors '"yellow"'
(integer) 4
Description: Append the json values into the array at path after the last element in it. Time Complexity: O(1) for each value added, O(N) for multiple values added where N is the size of the key
JSON.ARRINSERTJSON.ARRINSERT key path index value [value ...]

JSON.ARRINSERT employee_profile:1 .colors 2 '"purple"'
(integer) 5
Description: Insert the json values into the array at path before the index (shifts to the right). Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
JSON.ARRINDEXJSON.ARRINDEX key path value [start [stop]]

JSON.ARRINDEX employee_profile:1 .colors '"purple"'
(integer) 2
Description: Searches for the first occurrence of a JSON value in an array. Time Complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/lists/index.html b/howtos/quick-start/cheat-sheets/lists/index.html index 28aa4f0f6d..66733fa22f 100644 --- a/howtos/quick-start/cheat-sheets/lists/index.html +++ b/howtos/quick-start/cheat-sheets/lists/index.html @@ -4,7 +4,7 @@ lists | The Home of Redis Developers - + @@ -12,7 +12,7 @@

lists

CommandSyntaxExampleOutput
LPUSHLPUSH key value [value ...]

LPUSH myList "World"
(integer) 1
Description: Inserts the specified values at the head of the list stored at key. Time Complexity: O(N)
RPUSHRPUSH key value [value ...]

RPUSH myList "Hello"
(integer) 2
Description: Inserts the specified values at the tail of the list stored at key.Time Complexity: O(N)
LRANGELRANGE key start stop

LRANGE myList 0 -1
1) "World" 2) "Hello"
Description: Returns the specified elements of the list stored at key.Time Complexity: O(S+N) where S is the distance of start and N is the number of elements in the specified range.
LLENLLEN key

LLEN myList
(integer) 2
Description: Returns the length of the list stored at key.Time Complexity: O(1)
LPOPLPOP key [count]

LPOP myList
"World"
Description: Removes and returns the first element of the list stored at key.Time Complexity: O(N)
RPOPRPOP key [count]

RPOP myList
"Hello"
Description: Removes and returns the last element of the list stored at key.Time Complexity: O(N)
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/search-and-query/index.html b/howtos/quick-start/cheat-sheets/search-and-query/index.html index 077733152a..c8eaf318e7 100644 --- a/howtos/quick-start/cheat-sheets/search-and-query/index.html +++ b/howtos/quick-start/cheat-sheets/search-and-query/index.html @@ -4,7 +4,7 @@ search-and-query | The Home of Redis Developers - + @@ -12,7 +12,7 @@

search-and-query

CommandSyntaxExampleOutput
FT.CREATE

FT.CREATE index
[ON HASH | JSON]
[PREFIX count prefix [prefix ...]]
[FILTER {filter}]
SCHEMA
field_name [AS alias] TEXT | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]]
[NOINDEX]
...

FT.CREATE staff:index
ON JSON
PREFIX 1 staff:
SCHEMA
"$.name" AS name TEXT
"$.age" AS age NUMERIC
"$.isSingle" AS isSingle TAG
'$["skills"][*]' AS skills TAG SEPARATOR "|"
OK
Description: Create an index with the given specification.Time Complexity: O(K) where K is the number of fields in the document, O(N) for keys in the keySpace
FT.SEARCH

FT.SEARCH index query
[FILTER numeric_field min max [ FILTER numeric_field min max ...]]
[RETURN count identifier [AS property] [ identifier [AS property] ...]]
[SORTBY sortby [ ASC | DESC] [WITHCOUNT]]
[LIMIT offset num]
[PARAMS nargs name value [ name value ...]]

JSON.SET "staff:1" "$" '{"name":"Bob","age":22,"isSingle":true,"skills":["NodeJS","MongoDB","React"]}'

JSON.SET "staff:2" "$" '{"name":"Alex","age":45,"isSingle":true,"skills":["Python","MySQL","Angular"]}'

FT.SEARCH staff:index
"(@name:'alex')"
RETURN 1 $ LIMIT 0 10

FT.SEARCH staff:index
"((@isSingle:{true}) (@age:[(18 +inf]))"
RETURN 1 $ LIMIT 0 10
Matching documents data
Description: Search the index with a query, returning either documents or just ids. Time Complexity: O(N)
FT.AGGREGATE

FT.AGGREGATE index query
[LOAD count field [field ...]]
[ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg ...] [AS name] ...
[ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT]
[ APPLY expression AS name ...
[ LIMIT offset num]
[FILTER filter]
[ PARAMS nargs name value [ name value ...]]

FT.AGGREGATE staff:index "(@age:[(18 +inf])"
GROUPBY 1 @age
REDUCE COUNT_DISTINCT 1 @name AS staff_count

| age | staff_count |
| ----| ------------|
| 22 | 1 |
| 45 | 1 |
Description: Run a search query on an index, and perform aggregate transformations on the results.
FT.INFOFT.INFO index

FT.INFO staff:index
A list of configuration parameters and stats for the index.
Description: Return information and statistics on the index.Time Complexity: O(1)
FT.DROPINDEXFT.DROPINDEX index [DD]

FT.DROPINDEX staff:index
OK
Description: Dropping existing index.Time Complexity:O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/sets/index.html b/howtos/quick-start/cheat-sheets/sets/index.html index 2ee3df0f91..bb6842bc7b 100644 --- a/howtos/quick-start/cheat-sheets/sets/index.html +++ b/howtos/quick-start/cheat-sheets/sets/index.html @@ -4,7 +4,7 @@ sets | The Home of Redis Developers - + @@ -12,7 +12,7 @@

sets

CommandSyntaxExampleOutput
SADDSADD key member [member ...]

SADD mySet "Hello"
(integer) 1
Description: Adds the specified members to the set stored at key.Time Complexity: O(N)
SMEMBERSSMEMBERS key

SMEMBERS mySet
1) "Hello"
Description: Returns all the members of the set value stored at key.Time Complexity: O(N)
SCARDSCARD key

SCARD mySet
(integer) 1
Description: Returns the set cardinality (number of elements) of the set stored at key.Time Complexity: O(1)
SISMEMBERSISMEMBER key member

SISMEMBER mySet "Hello"
(integer) 1
Description: Returns if member is a member of the set stored at key.Time Complexity: O(1)
SDIFFSDIFF key1 [key2]

SDIFF mySet myOtherSet
1) "Hello"
Description: Returns the members of the set resulting from the difference between the first set and all the successive sets.Time Complexity: O(N)
SDIFFSTORESDIFFSTORE destination key1 [key2]

SDIFFSTORE myNewSet mySet myOtherSet
(integer) 1
Description: This command is equal to SDIFF, but instead of returning the resulting set, it is stored in destination.Time Complexity: O(N)
SREMSREM key member [member ...]

SREM mySet "Hello"
(integer) 1
Description: Removes the specified members from the set stored at key.
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/sorted-sets/index.html b/howtos/quick-start/cheat-sheets/sorted-sets/index.html index 47a9d52a1b..3cf5b5dcd5 100644 --- a/howtos/quick-start/cheat-sheets/sorted-sets/index.html +++ b/howtos/quick-start/cheat-sheets/sorted-sets/index.html @@ -4,7 +4,7 @@ sorted-sets | The Home of Redis Developers - + @@ -12,7 +12,7 @@

sorted-sets

CommandSyntaxExampleOutput
ZADDZADD key score member [score member ...]

ZADD myZSet 1 "one" 2 "two"
(integer) 2
Description: Adds all the specified members with the specified scores to the sorted set stored at key. Time Complexity: O(log(N))
ZRANGEZRANGE key start stop [WITHSCORES]

ZRANGE myZSet 0 -1
1) "one" 2)"two"
Description: Returns the specified range of elements in the sorted set stored at key.Time Complexity: O(log(N)+M) where M is the number of elements returned
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/streams/index.html b/howtos/quick-start/cheat-sheets/streams/index.html index 2dd131d9df..67d8e7f30e 100644 --- a/howtos/quick-start/cheat-sheets/streams/index.html +++ b/howtos/quick-start/cheat-sheets/streams/index.html @@ -4,7 +4,7 @@ streams | The Home of Redis Developers - + @@ -12,7 +12,7 @@

streams

CommandSyntaxExampleOutput
XADDXADD key field value [field value ...]

XADD myStream * sensorId "1234" temperature "19.8"
1518951480106-0
Description: Appends the specified stream entry to the stream at the specified key. Time Complexity: O(1) when adding a new entry.
XREADXREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]

XREAD COUNT 2 STREAMS myStream 0
1) 1) "myStream" 2) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
Description: Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller.
XRANGEXRANGE key start end [COUNT count]

XRANGE myStream 1518951480106-0 1518951480106-0
1) 1) 1) "1518951480106-0" 2) 1) "sensorId" 2) "1234" 3) "temperature" 4) "19.8"
Description: Returns the entries matching a range of IDs in a stream. Time Complexity: O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).
XLENXLEN key

XLEN myStream
(integer) 1
Description: Returns the number of entries of a stream. Time Complexity: O(1)
XDELXDEL key ID [ID ...]

XDEL myStream 1518951480106-0
(integer) 1
Description: Removes the specified entries from a stream. Time Complexity: O(1) for each single item to delete in the stream
XTRIMXTRIM key MAXLEN [~] count

XTRIM myStream MAXLEN 0
(integer) 0
Description: Trims the stream to a different length. Time Complexity: O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/strings/index.html b/howtos/quick-start/cheat-sheets/strings/index.html index 81bd30240f..22ca1c0513 100644 --- a/howtos/quick-start/cheat-sheets/strings/index.html +++ b/howtos/quick-start/cheat-sheets/strings/index.html @@ -4,7 +4,7 @@ strings | The Home of Redis Developers - + @@ -12,7 +12,7 @@

strings

CommandSyntaxExampleOutput
SETSET key value

SET myKey "Hello"
"OK"
Description: Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type.Time Complexity: O(1)
GETGET key

GET myKey
"Hello"
Description: Get the string value of key. If the key does not exist the special value nil is returned.Time Complexity: O(1)
MGETMGET key [key ...]

MGET myKey nonExistentKey
1) "Hello" 2) (nil)
Description: Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned.Time Complexity: O(N)
INCRINCR key

INCR myCounter
(integer) 1
Description: Increments the number stored at key by one. If the key does not exist, it is set to 0 before performing the operation.Time Complexity: O(1)
- + \ No newline at end of file diff --git a/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html b/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html index 472c1e8851..5d4e944e2b 100644 --- a/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html +++ b/howtos/quick-start/cheat-sheets/triggers-and-functions/index.html @@ -4,7 +4,7 @@ triggers-and-functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/howtos/quick-start/index.html b/howtos/quick-start/index.html index c4ad6c6db5..35bff1909d 100644 --- a/howtos/quick-start/index.html +++ b/howtos/quick-start/index.html @@ -4,7 +4,7 @@ Getting Started | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Getting Started


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

Welcome to the getting started for the official Redis Developer Hub!

If you are new to Redis, we recommend starting with Redis University (RU101). RU101 is an introductory course, perfect for developers new to Redis. In this course, you’ll learn about the data structures in Redis, and you’ll see how to practically apply them in the real world.

If you have questions related to Redis, come join the Redis Discord server. Our Discord server is a place where you can learn, share, and collaborate about anything and everything Redis. Connect with users from the community and Redis University. Get your questions answered and learn cool new tips and tricks! Watch for notifications of the latest content from Redis and the community. And share your own content with the community.

Setup Redis

There are essentially two ways you can use Redis:

  • Cloud Redis: A hosted and serverless Redis database-as-a-service (DBaaS). The fastest way to deploy Redis Enterprise via Amazon AWS, Google Cloud Platform, or Microsoft Azure.
  • On-prem/local Redis: Self-managed Redis using your own server and any operating system (Mac OS, Windows, or Linux).

If you choose to use local Redis we strongly recommend using Docker. If you choose not to use Docker, use the following instructions based on your OS:

The docker run command below exposes redis-server on port 6379 and RedisInsight on port 8001. You can use RedisInsight by pointing your browser to http://localhost:8001.

# install
$ docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest

You can use redis-cli to connect to the server at localhost:6379. If you don’t have redis-cli installed locally, you can run it from the Docker container like below:

# connect
$ docker exec -it redis-stack redis-cli

Basic Querying with Redis

  • Connect to Redis using CLI or RedisInsight (a GUI tool to visualize data & run commands)

RedisInsight RedisInsight

# syntax 1 : connect using host & port, followed by password
$ redis-cli -h host -p port
> AUTH password
OK

# example 1
$ redis-cli -h redis15.localnet.org -p 6390
> AUTH myUnguessablePassword
OK

# syntax 2 : connect using uri
$ redis-cli -u redis://user:password@host:port/dbnum

# example 2
$ redis-cli -u redis://LJenkins:p%40ssw0rd@redis-16379.hosted.com:16379/0

  • Basic CLI / RedisInsight workbench commands
# syntax : Check specific keys
> KEYS pattern

# example
> KEYS *

#------------
# syntax : Check number of keys in database
> DBSIZE

#------------
# syntax : set a key value
> SET key value EX expirySeconds

# example
> SET company redis EX 60

#------------
# syntax : get value by key
> GET key

# example
> GET company

#------------
# syntax : delete keys
> DEL key1 key2 key3 ... keyN

# example
> DEL company

#------------
# syntax : Check if key exists
> EXISTS key1

# example
> EXISTS company

#------------
# syntax : set expiry to key
> EXPIRE key seconds

# example
> EXPIRE lastname 60

#------------
# syntax : remove expiry from key
> PERSIST key

# example
> PERSIST lastname

#------------
# syntax : find (remaining) time to live of a key
> TTL key

# example
> TTL lastname

#------------
# syntax : increment a number
> INCR key

# example
> INCR counter

#------------
# syntax : decrement a number
> DECR key

# example
> DECR counter

Detailed CLI instructions can be viewed here and commands can be checked here

Secondary Indexing and Searching with Redis

Redis Stack enables the JSON data type in Redis.

# syntax : set an object value to a key
> JSON.SET objKey $ value

# example
> JSON.SET person $ '{"name":"Leonard Cohen","dob":1478476800,"isActive": true, "hobbies":["music", "cricket"]}'

#------------
# syntax : get object value of a key
> JSON.GET objKey $

# example
> JSON.GET person $

#------------
# syntax : find object key length
> JSON.OBJLEN objKey $

# example
> JSON.OBJLEN person $

#------------
# syntax : find object keys
> JSON.OBJKEYS objKey $

# example
> JSON.OBJKEYS person $

#------------
# syntax : update nested property
> JSON.SET objKey $.prop value

# example
> JSON.SET person $.name '"Alex"'

#------------
# syntax : update nested array
> JSON.SET objKey $.arrayProp fullValue
> JSON.SET objKey $.arrayProp[index] value

# example
> JSON.SET person $.hobbies '["music", "cricket"]'
> JSON.SET person $.hobbies[1] '"dance"'

#------------
# syntax : remove nested array item by index
> JSON.ARRPOP objKey $.arrayProp index

# example
> JSON.ARRPOP person $.hobbies 1

More details can be found in the Redis Stack docs


Redis Stack enables a query and indexing engine for Redis, providing secondary indexing, full-text search and aggregations capabilities.

  • We have to create index on schema to be able to search on its data
# syntax
> FT.CREATE {index_name} ON JSON PREFIX {count} {prefix} SCHEMA {json_path} AS {attribute} {type}
# NOTE: attribute = logical name, json_path = JSONPath expressions

# example
> FT.CREATE userIdx ON JSON PREFIX 1 users: SCHEMA $.user.name AS name TEXT $.user.hobbies AS hobbies TAG $.user.age as age NUMERIC
# NOTE: You can search by any attribute mentioned in the above index for keys that start with users: (e.g. users:1).
  • More details on Indexing JSON can be found here

Once index is created, any pre-existing/ new/ modified JSON document is automatically indexed.

//sample json document
{
"user": {
"name": "John Smith",
"hobbies": "foo,bar",
"age": 23
}
}
# adding JSON document
> JSON.SET myDoc $ '{"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}'
  • Search
# search all user documents with name 'John'
> FT.SEARCH userIdx '@name:(John)'
1) (integer) 1
2) "myDoc"
3) 1) "$"
2) {"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}"
  • Search & project required fields
# search documents with name 'John' & project only age field
> FT.SEARCH userIdx '@name:(John)' RETURN 1 $.user.age
1) (integer) 1
2) "myDoc"
3) 1) "$.user.age"
2) "23"
# project multiple fields
> FT.SEARCH userIdx '@name:(John)' RETURN 2 $.user.age $.user.name
1) (integer) 1
2) "myDoc"
3) 1) "$.user.age"
2) "23"
3) "$.user.name"
4) "John Smith"

#------------
# project with alias name
> FT.SEARCH userIdx '@name:(John)' RETURN 3 $.user.age AS userAge

1) (integer) 1
2) "myDoc"
3) 1) "userAge"
2) "23"
#------------

# multi field query
> FT.SEARCH userIdx '@name:(John) @hobbies:{foo | me} @age:[20 30]'
1) (integer) 1
2) "myDoc"
3) 1) "$"
2) {"user":{"name":"John Smith","hobbies":"foo,bar","age":23}}"

More details on query syntax

  • Drop index
> FT.DROPINDEX userIdx

Useful Resources

  1. Redis and JSON explained (Revisited in 2022) video
  2. Searching with Redis Stack
  3. Redis University 204, Storing, Querying, and Indexing JSON at Speed

Sync Redis with Other Databases

RedisGears adds a dynamic execution framework for your Redis data that enables you to write and execute functions that implement data flows in Redis.

Consider following example to sync data with MongoDB.

  • Create the below python file and update the MongoDB connection details, database, collection and primary key name to be synced
write-behind.py
# Gears Recipe for a single write behind

# import redis gears & mongo db libs
from rgsync import RGJSONWriteBehind, RGJSONWriteThrough
from rgsync.Connectors import MongoConnector, MongoConnection

# change mongodb connection
connection = MongoConnection("", "", "", "", "ENV_MONGODB_CONNECTION_URL")

# change MongoDB database
db = 'ENV_DB_NAME'

# change MongoDB collection & it's primary key
collection1Connector = MongoConnector(connection, db, 'ENV_COLLECTION1_NAME', 'ENV_COLLECTION1_PRIMARY_KEY')

# change redis keys with prefix that must be synced with mongodb collection
RGJSONWriteBehind(GB, keysPrefix='ENV_COLLECTION1_PREFIX_KEY',
connector=collection1Connector, name='Collection1WriteBehind',
version='99.99.99')
ENV_MONGODB_CONNECTION_URL=mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/dbSpeedMernDemo?authSource=admin
ENV_DB_NAME=dbSpeedMernDemo
ENV_COLLECTION1_NAME=movies
ENV_COLLECTION1_PRIMARY_KEY=movieId
ENV_COLLECTION1_PREFIX_KEY=movie

The code above demonstrates how you would sync a "movies" collection in MongoDB with Redis using the "movie" key prefix.

To get this working you first need to load the python file into redis-server:

$ redis-cli rg.pyexecute "`cat write-behind.py`" REQUIREMENTS rgsync pymongo==3.12.0

Now, insert a JSON item in to Redis starting with the prefix specified in the python file (i.e. "movie"):

# redis-cli command
> JSON.SET movie:123 $ '{"movieId":123,"name":"RRR","isActive": true}'

Now, verify whether the JSON is inserted into MongoDB.

Additional Resources For Syncing with Redis and Other Databases

  1. Redis gear sync with MongoDB
  2. RG.PYEXECUTE
  3. rgsync
  4. gears-cli

Probabilistic Data and Queries with Redis

Redis Stack supports probabilistic datatypes and queries. Below you will find a stock leaderboard example:

# Reserve a new leaderboard filter
> TOPK.RESERVE trending-stocks 12 50 4 0.9
"OK"

# Add a new entries to the leaderboard
> TOPK.ADD trending-stocks AAPL AMD MSFT INTC GOOG FB NFLX GME AMC TSLA
1) "null" ...

# Get the leaderboard
> TOPK.LIST trending-stocks
1) "AAPL"
2) "AMD"
2) "MSFT" ...

# Get information about the leaderboard
> TOPK.INFO trending-stocks
1) "k"
2) "12"
3) "width"
4) "50"
5) "depth"
6) "4"
7) "decay"
8) "0.90000000000000002"

More details in docs

TimeSeries Data and Queries with Redis

Redis Stack supports time-series use cases such as IoT, stock prices, and telemetry. You can ingest and query millions of samples and events at the speed of Redis. You can also use a variety of queries for visualization and monitoring with built-in connectors to popular tools like Grafana, Prometheus, and Telegraf.

The following example demonstrates how you might store temperature sensor readings in Redis Stack:

# Create new time-series, for example temperature readings
> TS.CREATE temperature:raw DUPLICATE_POLICY LAST
"OK"

# Create a bucket for monthly aggregation
> TS.CREATE temperature:monthly DUPLICATE_POLICY LAST
"OK"

# Automatically aggregate based on time-weighted average
> TS.CREATERULE temperature:raw temperature:monthly AGGREGATION twa 2629800000
"OK"

# Add data to the raw time-series
> TS.MADD temperature:raw 1621666800000 52 ...
1) "1621666800000" ...

# View the monthly time-weighted average temperatures
> TS.RANGE temperature:monthly 0 +
1) 1) "1621666800000"
2) "52" ...

# Delete compaction rule
> TS.DELETERULE temperature:raw temperature:monthly
"OK"

# Delete partial time-series
> TS.DEL temperature:raw 0 1621666800000
(integer) 1

More details in docs

Additional Resources

- + \ No newline at end of file diff --git a/howtos/ratelimiting/index.html b/howtos/ratelimiting/index.html index 1f2b48fd2b..e658f2f755 100644 --- a/howtos/ratelimiting/index.html +++ b/howtos/ratelimiting/index.html @@ -4,7 +4,7 @@ How to build a Rate Limiter using Redis | The Home of Redis Developers - + @@ -15,7 +15,7 @@ The application will return after each request the following headers. That will let the user know how many requests they have remaining before the run over the limit. On the 10th run server should return an HTTP status code of 429 Too Many Requests

SETNX is short for "SET if Not eXists". It basically sets key to hold string value if key does not exist. In that case, it is equal to SET. When key already holds a value, no operation is performed. New responses are added key-ip as shown below:

 SETNX your_ip:PING limit_amount
Example: SETNX 127.0.0.1:PING 10

More information

Set a timeout on key:

 EXPIRE your_ip:PING timeout
Example: EXPIRE 127.0.0.1:PING 1000

More information

How the data is accessed:

Next responses are get bucket:

 GET your_ip:PING
Example: GET 127.0.0.1:PING

More information

Next responses are changed bucket:

 DECRBY your_ip:PING amount
Example: DECRBY 127.0.0.1:PING 1

More information

References

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html b/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html index 97f7a66a1a..05eca07588 100644 --- a/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html +++ b/howtos/redisai/bert-qa-benchmarking-with-redisai-and-redisgears/index.html @@ -4,7 +4,7 @@ Benchmarks for BERT Large Question Answering inference for RedisAI and RedisGears | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Benchmarks for BERT Large Question Answering inference for RedisAI and RedisGears


Profile picture for Alex Mikhalev
Author:
Alex Mikhalev, AI/ML Architect at Nationwide Building Society
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

Introduction

In this article, we will explore the challenges and opportunities associated with deploying large BERT Question Answering Transformer models from Hugging Face, using RedisGears and RedisAI to perform a lot of the heavy lifting while also leveraging the in-memory datastore Redis.

Why do we need RedisAI?

In data science workloads:

  • You want to load high-performance hardware as close to 100% as possible
  • You prefer to re-calculate results

However, in a client-facing application:

  • You want to be able to distribute the load evenly, so it never reaches 100%, and client-facing servers can perform additional functions
  • You prefer to cache results of previous calculations and fetch data from the cache as fast as possible to provide a seamless customer experience

So before we go any further, why should you read this article? Here are some numbers for inspiration:

First:

python3 transformers_plain_bert_qa.py
airborne transmission of respiratory infections is the lack of established methods for the detection of airborne respiratory microorganisms
10.351818372 seconds

The above script uses a slightly modified transformer from the default pipeline for BERT QA, and running it on the server takes 10 seconds. The server uses the latest 12th Gen Intel(R) Core(TM) i9-12900K, full cpuinfo.

However:

time curl -i -H "Content-Type: application/json" -X POST -d '{"search":"Who performs viral transmission among adults"}' http://localhost:8080/qasearch

real 0m0.747s
user 0m0.004s
sys 0m0.000s

The scriptscript runs BERT QA inference on each shard, which is equal to the number of available CPUs by default, and returns answers under one second.

Incredible, right? Let's dive in!

Background

BERT Question Answering inference works where the ML model selects an answer from the given text. In other words, BERT QA "thinks" through the following: "What is the answer from the text, assuming the answer to the question exists within the paragraph selected."

So it's important to select text potentially containing an answer. A typical pattern is to use Wikipedia data to build Open Domain Question Answering.

Our QA system is a medical domain-specific question/answering pipeline, hence we need a first pipeline that turns data into a knowledge graph. This NLP pipeline is available at Redis LaunchPad, is fully open source, and is described in a previous article. Here is a 5 minute video describing it, and below you will find an architectural overview:

featured

BERT Question Answering pipeline and API

In the BERT QA pipeline (or in any other modern NLP inference task), there are two steps:

  1. Tokenize text - turn text into numbers
  2. Run the inference - large matrix multiplication

With Redis, we have the opportunity to pre-compute everything and store it in memory, but how do we do it? Unlike with the summarization ML learning task, the question is not known in advance, so we can't pre-compute all possible answers. However, we can pre-tokenize all potential answers (i.e. all paragraphs in the dataset) using RedisGears:

def parse_sentence(record):
import redisAI
import numpy as np
global tokenizer
if not tokenizer:
tokenizer=loadTokeniser()
hash_tag="{%s}" % hashtag()

for idx, value in sorted(record['value'].items(), key=lambda item: int(item[0])):
tokens = tokenizer.encode(value, add_special_tokens=False, max_length=511, truncation=True, return_tensors="np")
tokens = np.append(tokens,tokenizer.sep_token_id).astype(np.int64)
tensor=redisAI.createTensorFromBlob('INT64', tokens.shape, tokens.tobytes())

key_prefix='sentence:'
sentence_key=remove_prefix(record['key'],key_prefix)
token_key = f"tokenized:bert:qa:{sentence_key}:{idx}"
redisAI.setTensorInKey(token_key, tensor)
execute('SADD',f'processed_docs_stage3_tokenized{hash_tag}', token_key)

See the full code on GitHub.

Then for each Redis Cluster shard, we pre-load the BERT QA model by downloading, exporting it into torchscript, then loading it into each shard:

def load_bert():
model_file = 'traced_bert_qa.pt'

with open(model_file, 'rb') as f:
model = f.read()
startup_nodes = [{"host": "127.0.0.1", "port": "30001"}, {"host": "127.0.0.1", "port":"30002"}, {"host":"127.0.0.1", "port":"30003"}]
cc = ClusterClient(startup_nodes = startup_nodes)
hash_tags = cc.execute_command("RG.PYEXECUTE", "gb = GB('ShardsIDReader').map(lambda x:hashtag()).run()")[0]
print(hash_tags)
for hash_tag in hash_tags:
print("Loading model bert-qa{%s}" %hash_tag.decode('utf-8'))
cc.modelset('bert-qa{%s}' %hash_tag.decode('utf-8'), 'TORCH', 'CPU', model)
print(cc.infoget('bert-qa{%s}' %hash_tag.decode('utf-8')))

The full code is available on GitHub.

And when a question comes from the user, we tokenize and append the question to the list of potential answers before running the RedisAI model:

    token_key = f"tokenized:bert:qa:{sentence_key}"
# encode question
input_ids_question = tokenizer.encode(question, add_special_tokens=True, truncation=True, return_tensors="np")
t=redisAI.getTensorFromKey(token_key)
input_ids_context=to_np(t,np.int64)
# merge (append) with potential answer, context - is pre-tokenized paragraph
input_ids = np.append(input_ids_question,input_ids_context)
attention_mask = np.array([[1]*len(input_ids)])
input_idss=np.array([input_ids])
num_seg_a=input_ids_question.shape[1]
num_seg_b=input_ids_context.shape[0]
token_type_ids = np.array([0]*num_seg_a + [1]*num_seg_b)
# create actual model runner for RedisAI
modelRunner = redisAI.createModelRunner(f'bert-qa{hash_tag}')
# make sure all types are correct
input_idss_ts=redisAI.createTensorFromBlob('INT64', input_idss.shape, input_idss.tobytes())
attention_mask_ts=redisAI.createTensorFromBlob('INT64', attention_mask.shape, attention_mask.tobytes())
token_type_ids_ts=redisAI.createTensorFromBlob('INT64', token_type_ids.shape, token_type_ids.tobytes())
redisAI.modelRunnerAddInput(modelRunner, 'input_ids', input_idss_ts)
redisAI.modelRunnerAddInput(modelRunner, 'attention_mask', attention_mask_ts)
redisAI.modelRunnerAddInput(modelRunner, 'token_type_ids', token_type_ids_ts)
redisAI.modelRunnerAddOutput(modelRunner, 'answer_start_scores')
redisAI.modelRunnerAddOutput(modelRunner, 'answer_end_scores')
# run RedisAI model runner
res = await redisAI.modelRunnerRunAsync(modelRunner)
answer_start_scores=to_np(res[0],np.float32)
answer_end_scores = to_np(res[1],np.float32)
answer_start = np.argmax(answer_start_scores)
answer_end = np.argmax(answer_end_scores) + 1
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end],skip_special_tokens = True))
log("Answer "+str(answer))
return answer

Checkout the full code, available on GitHub.

The process for making a BERT QA API call looks like this:

Architecture Diagram for BERT QA RedisGears and RedisAI

Here I use two cool features of RedisGears: capturing events on key miss and using async/await to run RedisAI on each shard without locking the primary thread - so that Redis Cluster can continue to serve other customers. For benchmarks, caching responses from RedisAI is disabled. If you are getting response times in nanoseconds on the second call rather then milliseconds, check to make sure the line linked above is commented out.

Running the Benchmark

Pre-requisites for running the benchmark:

Assuming you are running Debian or Ubuntu and have Docker and docker-compose installed (or can create a virtual environment via conda), run the following commands:

git clone --recurse-submodules https://github.com/applied-knowledge-systems/the-pattern.git
cd the-pattern
./bootstrap_benchmark.sh

The above commands should end with a curl call to the qasearch API, since Redis caching is disabled for the benchmark.

Next, invoke curl like this:

time curl -i -H "Content-Type: application/json" -X POST -d '{"search":"Who performs viral transmission among adults"}' http://localhost:8080/qasearch

Expect the following output, or something similar based on your runtime environment:

HTTP/1.1 200 OK
Server: nginx/1.18.0 (Ubuntu)
Date: Sun, 29 May 2022 12:05:39 GMT
Content-Type: application/json
Content-Length: 2120
Connection: keep-alive

{"links":[{"created_at":"2002","rank":13,"source":"C0001486","target":"C0152083"}],"results":[{"answer":"adenovirus","sentence":"The medium of 40 T150 flasks of adenovirus transducer dec CAR CHO cells yielded 0 5 1 my of purified msCEACAM1a 1 4 protein","sentencekey":"sentence:PMC125375.xml:{mG}:202","title":"Crystal structure of murine sCEACAM1a[1,4]: a coronavirus receptor in the CEA family"}] OUTPUT_REDUCTED}

I modified the output of the API for the benchmark to return results from all shards - even if the answer is empty. In the run above five shards return answers. The overall API call response takes less than one second with all additional hops to search in RedisGraph!

Architecture Diagram for BERT QA API call

Deep Dive into the Benchmark

Let's dig deeper into what's happening under the hood:

You should have a sentence key with shard id, which you get by looking at the "Cache key" from docker logs -f rgcluster. In my setup the cache key is, "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults". If you think it looks like a function call it's because it is a function call. It is triggered if the key isn't present in the Redis Cluster, which for the benchmark will be every time since if you remember we disabled caching the output.

One more thing to figure out from the logs is the port of the shard corresponding to the hashtag, also known as the shard id. It is the text found in betweeen the curly brackets – looks like {6fd} above. The same will be in the output for the export_load script. In my case the cache key was found in "30012.log", so my port is 30012.

Next I run the following command:

redis-cli -c -p 300012 -h 127.0.0.1 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"

and then run the benchmark:

redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
10 requests completed in 0.04 seconds
50 parallel clients
3 bytes payload
keep alive: 1

10.00% <= 41 milliseconds
100.00% <= 41 milliseconds
238.10 requests per second

If you are wondering, -n = number of times. In this case we run the benchmark 10 times. You can also add:

csv if you want to output in CSV format

precision 3 if you want more decimals in the ms

More information about the benchmarking tool can be found on the redis.io Benchmarks page.

if you don't have redis-utils installed locally, you can use Docker as follows:

docker exec -it rgcluster /bin/bash
redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
10 requests completed in 1.75 seconds
50 parallel clients
99 bytes payload
keep alive: 1
host configuration "save":
host configuration "appendonly": no
multi-thread: no

Latency by percentile distribution:
0.000% <= 243.711 milliseconds (cumulative count 1)
50.000% <= 987.135 milliseconds (cumulative count 5)
75.000% <= 1577.983 milliseconds (cumulative count 8)
87.500% <= 1662.975 milliseconds (cumulative count 9)
93.750% <= 1744.895 milliseconds (cumulative count 10)
100.000% <= 1744.895 milliseconds (cumulative count 10)

Cumulative distribution of latencies:
0.000% <= 0.103 milliseconds (cumulative count 0)
10.000% <= 244.223 milliseconds (cumulative count 1)
20.000% <= 409.343 milliseconds (cumulative count 2)
30.000% <= 575.487 milliseconds (cumulative count 3)
40.000% <= 821.247 milliseconds (cumulative count 4)
50.000% <= 987.135 milliseconds (cumulative count 5)
60.000% <= 1157.119 milliseconds (cumulative count 6)
70.000% <= 1497.087 milliseconds (cumulative count 7)
80.000% <= 1577.983 milliseconds (cumulative count 8)
90.000% <= 1662.975 milliseconds (cumulative count 9)
100.000% <= 1744.895 milliseconds (cumulative count 10)

Summary:
throughput summary: 5.73 requests per second
latency summary (msec):
avg min p50 p95 p99 max
1067.296 243.584 987.135 1744.895 1744.895 1744.895

The platform only has 20 articles and 8 Redis nodes (4 masters + 4 slaves), so relevance would be wrong and it doesn't need a lot of memory.

AI.INFO

Now let's check how long our RedisAI model runs on the {6fd} shard:

127.0.0.1:30012> AI.INFO bert-qa{6fd}
1) "key"
2) "bert-qa{6fd}"
3) "type"
4) "MODEL"
5) "backend"
6) "TORCH"
7) "device"
8) "CPU"
9) "tag"
10) ""
11) "duration"
12) (integer) 8928136
13) "samples"
14) (integer) 58
15) "calls"
16) (integer) 58
17) "errors"
18) (integer) 0

bert-qa{6fd} is the key of the actual (very large) model saved. The AI.INFO command gives us a cumulative duration of 8928136 microseconds and 58 calls, which is approximately 153 milliseconds per call.

Let's double-check to make sure that's right by resetting the stats and then re-runnning the benchmark.

First, reset the stats:

127.0.0.1:30012> AI.INFO bert-qa{6fd} RESETSTAT
OK
127.0.0.1:30012> AI.INFO bert-qa{6fd}
1) "key"
2) "bert-qa{6fd}"
3) "type"
4) "MODEL"
5) "backend"
6) "TORCH"
7) "device"
8) "CPU"
9) "tag"
10) ""
11) "duration"
12) (integer) 0
13) "samples"
14) (integer) 0
15) "calls"
16) (integer) 0
17) "errors"
18) (integer) 0

Then, re-run the benchmark:

redis-benchmark -p 30012 -h 127.0.0.1 -n 10 get "bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults"
====== get bertqa{6fd}_PMC169038.xml:{6fd}:33_Who performs viral transmission among adults ======
10 requests completed in 1.78 seconds
50 parallel clients
99 bytes payload
keep alive: 1
host configuration "save":
host configuration "appendonly": no
multi-thread: no

Latency by percentile distribution:
0.000% <= 188.927 milliseconds (cumulative count 1)
50.000% <= 995.839 milliseconds (cumulative count 5)
75.000% <= 1606.655 milliseconds (cumulative count 8)
87.500% <= 1692.671 milliseconds (cumulative count 9)
93.750% <= 1779.711 milliseconds (cumulative count 10)
100.000% <= 1779.711 milliseconds (cumulative count 10)

Cumulative distribution of latencies:
0.000% <= 0.103 milliseconds (cumulative count 0)
10.000% <= 189.183 milliseconds (cumulative count 1)
20.000% <= 392.191 milliseconds (cumulative count 2)
30.000% <= 540.159 milliseconds (cumulative count 3)
40.000% <= 896.511 milliseconds (cumulative count 4)
50.000% <= 996.351 milliseconds (cumulative count 5)
60.000% <= 1260.543 milliseconds (cumulative count 6)
70.000% <= 1456.127 milliseconds (cumulative count 7)
80.000% <= 1606.655 milliseconds (cumulative count 8)
90.000% <= 1692.671 milliseconds (cumulative count 9)
100.000% <= 1779.711 milliseconds (cumulative count 10)

Summary:
throughput summary: 5.62 requests per second
latency summary (msec):
avg min p50 p95 p99 max
1080.454 188.800 995.839 1779.711 1779.711 1779.711

Now check the stats again:

AI.INFO bert-qa{6fd}
1) "key"
2) "bert-qa{6fd}"
3) "type"
4) "MODEL"
5) "backend"
6) "TORCH"
7) "device"
8) "CPU"
9) "tag"
10) ""
11) "duration"
12) (integer) 1767749
13) "samples"
14) (integer) 20
15) "calls"
16) (integer) 20
17) "errors"
18) (integer) 0

Now we get 88387.45 microseconds per call, which is pretty fast! Also, considering we started with 10 seconds per call, I think the benefits of using RedisAI in combination with RedisGears are pretty obvious. However, the trade-off is high memory usage.

There are many ways to optimize this deployment. For example, you can add a FP16 quantization and ONNX runtime. If you would like to try that, this script will be a good starting point.

Using Grafana to monitor RedisGears throughput, CPU, and Memory usage

Thanks to the contribution of Mikhail Volkov, we can now observe RedisGears and RedisGraph throughput and memory consumption using Grafana. When you cloned repository it started Graphana Docker, which has pre-build templates to monitor RedisCluster, including RedisGears and RedisAI, and Graph - which is Redis with RedisGraph. "The Pattern" dashboard provides an overview, with all the key benchmark metrics you care about:

Grafana for RedisGraph

Grafana for RedisCluster

- + \ No newline at end of file diff --git a/howtos/redisgraph/csvtograph/index.html b/howtos/redisgraph/csvtograph/index.html index 4299fa03d3..a9121b5c85 100644 --- a/howtos/redisgraph/csvtograph/index.html +++ b/howtos/redisgraph/csvtograph/index.html @@ -4,7 +4,7 @@ How to build RedisGraph databases from CSV inputs in Easy Steps | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to build RedisGraph databases from CSV inputs in Easy Steps

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. It shows how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. It allows you to query graphs using the industry-standard Cypher query language and you can easily use graph capabilities from application code.

My Image

RedisGraph Bulk Loader

If you have a bunch of CSV files that you want to load to RedisGraph database, you must try out this Bulk Loader utility. Rightly called RedisGraph Bulk Loader, this tool is written in Python and helps you in building RedisGraph databases from CSV inputs. This utility requires a Python 3 interpreter.

Follow the steps below to load CSV data into RedisGraph database:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Clone the Bulk Loader Utility

 $ git clone https://github.com/RedisGraph/redisgraph-bulk-loader

Step 4. Installing the RedisGraph Bulk Loader tool

The bulk loader can be installed using pip:

  pip3 install redisgraph-bulk-loader

Or

 pip3 install git+https://github.com/RedisGraph/redisgraph-bulk-loader.git@master

Step 5. Create a Python virtual env for this work

 python3 -m venv redisgraphloader

Step 6. Step into the venv:

 source redisgraphloader/bin/activate

Step 7. Install the dependencies for the bulk loader:

 pip3 install -r requirements.txt

If the above command doesn’t work, install the below modules:

 pip3 install pathos
pip3 install redis
pip3 install click

Step 8. Install groovy

 groovy generateCommerceGraphCSVForImport.groovy

Step 9. Verify the .csv files created

 head -n2 *.csv
==> addtocart.csv <==
src_person,dst_product,timestamp
0,1156,2010-07-20T16:11:20.551748

==> contain.csv <==
src_person,dst_order
2000,1215

==> order.csv <==
_internalid,id,subTotal,tax,shipping,total
2000,0,904.71,86.40,81.90,1073.01

==> person.csv <==
_internalid,id,name,address,age,memberSince
0,0,Cherlyn Corkery,146 Kuphal Isle South Jarvis MS 74838-0662,16,2010-03-18T16:25:20.551748

==> product.csv <==
_internalid,id,name,manufacturer,msrp
1000,0,Sleek Plastic Car,Thiel Hills and Leannon,385.62

==> transact.csv <==
src_person,dst_order
2,2000

==> view.csv <==
src_person,dst_product,timestamp
0,1152,2012-04-14T11:23:20.551748

Step 10. Run the Bulk loader script

  python3 bulk_insert.py prodrec-bulk -n person.csv -n product.csv -n order.csv -r view.csv -r addtocart.csv -r transact.csv -r contain.csv
person [####################################] 100%
1000 nodes created with label 'person'
product [####################################] 100%
1000 nodes created with label 'product'
order [####################################] 100%
811 nodes created with label 'order'
view [####################################] 100%
24370 relations created for type 'view'
addtocart [####################################] 100%
6458 relations created for type 'addtocart'
transact [####################################] 100%
811 relations created for type 'transact'
contain [####################################] 100%
1047 relations created for type 'contain'
Construction of graph 'prodrec-bulk' complete: 2811 nodes created, 32686 relations created in 1.021761 seconds
 graph.query prodrec "match (p:person) where p.id=200 return p.name"
1) 1) "p.name"
2) (empty array)
3) 1) "Cached execution: 0"
2) "Query internal execution time: 0.518300 milliseconds"

Step 10 . Install RedisInsight

To use RedisInsight on a local Mac, you can download from the RedisInsight page on the RedisLabs website:

Click this link to access a form that allows you to select the operating system of your choice.

My Image

If you have Docker Engine installed in your system, the quick way is to run the following command:

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 11. Accessing RedisInsight

Next, point your browser to http://localhost:8001.

Step 12. Run the Graph Query

 GRAPH.QUERY "prodrec-bulk" "match (p:person) where p.id=199 return p"

My Image

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/explore-python-code/index.html b/howtos/redisgraph/explore-python-code/index.html index 55301b143a..8f1dc8400a 100644 --- a/howtos/redisgraph/explore-python-code/index.html +++ b/howtos/redisgraph/explore-python-code/index.html @@ -4,7 +4,7 @@ Explore Python Codebase using RedisGraph | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Explore Python Codebase using RedisGraph

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

Pycograph is an open source tool that creates a RedisGraph model of your Python code. The tool lets you to explore your Python codebase with graph queries. With Pycograph, you can query the python code with Cypher. Additionally, it is possible to visualize the graph model using RedisInsight.

The project is hosted over https://pycograph.com/ and package is available in PyPI repository. It was introduced for the first time by Reka Horvath during RedisConf 2021.

Let us see how to explore Python code using Pycograph and RedisGraph below:

Step 1. Install Docker

 curl -sSL https://get.docker.com/ | sh

Step 2. Install Pycograph from PyPI

 pip install pycograph

Step 3. Start RedisGraph Module

The redis/redis-stack Docker image provides you all the essential Redis modules.

 docker run -d -p 6379:6379 redis/redis-stack

Step 4. Run RedisInsight

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 5. Load a sample Python code

We will be using a popular Docker compose project for our sample python code. Clone the Docker Compose project repository

  git clone https://github.com/docker/compose

Step 6. Load Python Code

Load your project's code with the pycograph load command:

 pycograph load --project-dir compose
Results:
 Graph successfully updated.
{'graph name': 'compose', 'nodes added': 2428, 'edges added': 11239}

Step 7. Visualize the project

Open RedisInsight, select RedisGraph on the left menu and run the below query:

Query #1: Return every node

 MATCH (n) RETURN n

You will see the below output:

My Image

Query #2: Return every non-test object

My Image

Query #3. Displaying the function behind the docker-compose up command

A query returning exactly one node using the unique full_name property. Double-click on the node to display all its relationships.

My Image

Query #4. Displaying the 'docker-compose up' and its calls relationships

My Image

Query #5. Displaying the 'docker-compose up' vs 'docker-compose run'

Functions called by the Docker Compose top level commands up and run

My Image

References:

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/getting-started/index.html b/howtos/redisgraph/getting-started/index.html index 2784c88a8f..8915e30213 100644 --- a/howtos/redisgraph/getting-started/index.html +++ b/howtos/redisgraph/getting-started/index.html @@ -4,7 +4,7 @@ Graph database using Redis Stack | The Home of Redis Developers - + @@ -14,7 +14,7 @@

Graph database using Redis Stack

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is a Redis module that enables enterprises to process any kind of connected data much faster than with traditional relational or existing graph databases. RedisGraph implements a unique data storage and processing solution (with sparse-adjacency matrices and GraphBLAS) to deliver the fastest and most efficient way to store, manage, and process connected data in graphs. With RedisGraph, you can process complex transactions 10 - 600 times faster than with traditional graph solutions while using 50 - 60% less memory resources than other graph databases!

Step 1. Create a free Cloud account

Create your free Redis Enterprise Cloud account. Once you click on “Get Started”, you will receive an email with a link to activate your account and complete your signup process.

tip

For a limited time, use TIGER200 to get $200 credits on Redis Enterprise Cloud and try all the advanced capabilities!

🎉 Click here to sign up

Step 2. Create Your database

Choose your preferred cloud vendor. Select the region and then click "Let's start free" to create your free database automatically.

tip

If you want to create a custom database with your preferred name and type of Redis, click "Create a custom database" option shown in the image.

create database

Step 3. Verify the database details

You will be provided with Public endpoint URL and "Redis Stack" as the type of database with the list of modules that comes by default.

verify database

Step 4. Install RedisInsight

RedisInsight is a visual tool that lets you do both GUI- and CLI-based interactions with your Redis database, and so much more when developing your Redis based application. It is a fully-featured pure Desktop GUI client that provides capabilities to design, develop and optimize your Redis application. It works with any cloud provider as long as you run it on a host with network access to your cloud-based Redis server. It makes it easy to discover cloud databases and configure connection details with a single click. It allows you to automatically add Redis Enterprise Software and Redis Enterprise Cloud databases.

You can install Redis Stack on your local system to get RedisInsight GUI tool up and running. Ensure that you have brew package installed in your Mac system.

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
  ==> Installing Cask redis-stack-redisinsight
==> Moving App 'RedisInsight-preview.app' to '/Applications/RedisInsight-preview.app'
🍺 redis-stack-redisinsight was successfully installed!
==> Installing Cask redis-stack
🍺 redis-stack was successfully installed!

Go to Applications and click "RedisInsight-v2" to bring up the Redis Desktop GUI tool.

Step 5. Add Redis database

access redisinsight

Step 6. Enter Redis Enterprise Cloud details

Add the Redis Enterprise cloud database endpoint, port and password.

access redisinsight

Step 7. Verify the database under RedisInsight dashboard

database details

Step 8. Getting Started with RedisGraph

In the following steps, we will use some basic RediGraph commands to insert data into a graph and then query the graph. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.)

RedisGraph

Step 9: Insert data into a graph

Insert actors

To interact with RedisGraph you will typically use the GRAPH.QUERY command and execute Cypher queries. Let’s start to insert some actors into the graph:movies graph name, which is automatically created using this command:

>> GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Mark Hamill', actor_id:1}), (:Actor {name:'Harrison Ford', actor_id:2}), (:Actor {name:'Carrie Fisher', actor_id:3})"

1) 1) "Labels added: 1"
2) "Nodes created: 3"
3) "Properties set: 6"
4) "Query internal execution time: 0.675400 milliseconds"

This single query creates three actors, along with their names and unique IDs.

Insert a movie

> GRAPH.QUERY graph:movies "CREATE (:Movie {title:'Star Wars: Episode V - The Empire Strikes Back', release_year: 1980 , movie_id:1})"
1) 1) "Labels added: 1"
2) "Nodes created: 1"
3) "Properties set: 3"
4) "Query internal execution time: 0.392300 milliseconds"

This single query creates a movie with a title, the release year, and an ID.

Associate actors and movies

The core of a graph is the relationships between the nodes, allowing the applications to navigate and query them. Let’s create a relationship between the actors and the movies:

> GRAPH.QUERY graph:movies "MATCH (a:Actor),(m:Movie) WHERE a.actor_id = 1 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Luke Skywalker'}]->(m) RETURN r"
1) 1) "r"
2) 1) 1) 1) 1) "id"
2) (integer) 1
2) 1) "type"
2) "Acted_in"
3) 1) "src_node"
2) (integer) 0
4) 1) "dest_node"
2) (integer) 3
5) 1) "properties"
2) 1) 1) "role"
2) "Luke Skywalker"
3) 1) "Properties set: 1"
2) "Relationships created: 1"
3) "Query internal execution time: 0.664800 milliseconds"

This command created a new relation indicating that the actor Mark Hamill acted in Star Wars: Episode V as Luke Skywalker.

Let’s repeat this process for the other actors:

> GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 2 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Han Solo'}]->(m) RETURN r"
> GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 3 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Princess Leila'}]->(m) RETURN r"

You can also do all of this in a single query, for example:

> GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Marlo Brando', actor_id:4})-[:Acted_in {role:'Don Vito Corleone'}]->(:Movie {title:'The Godfather', release_year: 1972 , movie_id:2})"

1) 1) "Nodes created: 2"
2) "Properties set: 6"
3) "Relationships created: 1"
4) "Query internal execution time: 0.848500 milliseconds"

Querying the graph

Now that you have data in your graph, you’re ready to ask some questions, such as:

“What are the titles of all the movies?”

> GRAPH.QUERY graph:movies "MATCH (m:Movie) RETURN m.title"

1) 1) "m.title"
2) 1) 1) "Star Wars: Episode V - The Empire Strikes Back"
2) 1) "The Godfather"
3) 1) "Query internal execution time: 0.349400 milliseconds"

“What is the information for the movie with the ID of 1?”

> GRAPH.QUERY graph:movies "MATCH (m:Movie) WHERE m.movie_id = 1 RETURN m"

1) 1) "m"
2) 1) 1) 1) 1) "id"
2) (integer) 3
2) 1) "labels"
2) 1) "Movie"
3) 1) "properties"
2) 1) 1) "title"
2) "Star Wars: Episode V - The Empire Strikes Back"
2) 1) "release_year"
2) (integer) 1980
3) 1) "movie_id"
2) (integer) 1
3) 1) "Query internal execution time: 0.365800 milliseconds"

“Who are the actors in the movie 'Star Wars: Episode V - The Empire Strikes Back' and what roles did they play?”

> GRAPH.QUERY graph:movies "MATCH (a:Actor)-[r:Acted_in]-(m:Movie) WHERE m.movie_id = 1 RETURN a.name,m.title,r.role"
1) 1) "a.name"
2) "m.title"
3) "r.role"
2) 1) 1) "Mark Hamill"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "Luke Skywalker"
2) 1) "Harrison Ford"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "Han Solo"
3) 1) "Carrie Fisher"
2) "Star Wars: Episode V - The Empire Strikes Back"
3) "Princess Leila"
3) 1) "Query internal execution time: 0.641200 milliseconds"

Visualizing graph databases using RedisInsight

If you are using RedisInsight, you can visualize and navigate into the nodes and relationships graphically. Click on the RedisGraph menu entry on the left and enter the query:

MATCH (m:Actor) return m

Click on the Execute button, and double click on the actors to follow the relationships You should see a graph like this one:

RedisGraph

Resources

Next Steps

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/index.html b/howtos/redisgraph/index.html index 1b8a4ee401..1da5e1f77e 100644 --- a/howtos/redisgraph/index.html +++ b/howtos/redisgraph/index.html @@ -4,7 +4,7 @@ RedisGraph Tutorial | The Home of Redis Developers - + @@ -12,7 +12,7 @@

RedisGraph Tutorial

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

The following links provides you with the available options to get started with RedisGraph
Getting Started with RedisGraph
Explore Python code using RedisGraph
Building RedisGraph databases from CSV Inputs
- + \ No newline at end of file diff --git a/howtos/redisgraph/redisgraph-cheatsheet/index.html b/howtos/redisgraph/redisgraph-cheatsheet/index.html index 33c913b26a..a84f301084 100644 --- a/howtos/redisgraph/redisgraph-cheatsheet/index.html +++ b/howtos/redisgraph/redisgraph-cheatsheet/index.html @@ -4,7 +4,7 @@ RedisGRAPH Cheatsheet | The Home of Redis Developers - + @@ -12,7 +12,7 @@

RedisGRAPH Cheatsheet

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

CommandPurposeSyntax
Execute a query against a named graphGRAPH.QUERY <graph name> "<query>"
Executes a read only query against a named graphGRAPH.RO_QUERY <graph name> "<query>"
Query Structure: MATCH describes the relationship between queried entities, using ascii art to represent pattern(s) to match againstGRAPH.QUERY <graph name> "MATCH <pattern>"
The OPTIONAL MATCH clause is a MATCH variant that produces null values for elements that do not match successfully, rather than the all-or-nothing logic for patterns in MATCH clausesGRAPH.QUERY <graph name> "MATCH <pattern> OPTIONAL MATCH <pattern>"
The WHERE clause is not mandatory, but if you want to filter results, you can specify your predicates here. Click the link to see supported operations.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern>"
The RETURN clause essentially defines the result set. In this way, a tailored result-set is customized.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern>"
ORDER BY specifies that the output be sorted and howGRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> [ASC|DESC]"
SKIP optional clause allows for a specified number of records to be omitted from the result setGRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> <[ASC | DESC ] SKIP \" where "n" is an integer
LIMIT clause is not mandatory and can be used to limit the number of records returned by a RETURN set.GRAPH.QUERY <graph name> "MATCH <pattern> WHERE <pattern> RETURN <pattern> ORDER BY <variables> [ASC | DESC ] LIMIT " where "n" is an integer
LIMIT clause is not mandatory and can be used to limit the number of records returned by a RETURN set.GRAPH.QUERY <graph name> "MATCH \<pattern> WHERE \<pattern> RETURN \<pattern> ORDER BY \<variables> [ASC | DESC ] LIMIT \<n>" where "n" is an integer
CREATE clause is used to introduce new nodes and relationshipsGRAPH.QUERY <graph name> "MATCH <pattern> CREATE <nodes>"
DELETE clause is used to remove both nodes and relationshipsGRAPH.QUERY <graph name> "MATCH \<pattern> DELETE \<alias>"
SET clause is used to create or update properties on nodes and relationshipsGRAPH.QUERY <graph name> "MATCH \<pattern> SET \<property value>"
MERGE clause ensures that a path exists in the graphGRAPH.QUERY <graph name> "MERGE \<property value>"
WITH clause allows parts of queries to be independently executed and have their results handled uniquely.GRAPH.QUERY <graph name> "MATCH \<pattern> WITH \<property value> AS \<property value>"
- + \ No newline at end of file diff --git a/howtos/redisgraph/redisgraphmovies/index.html b/howtos/redisgraph/redisgraphmovies/index.html index 919cbbe8db..c26276acc7 100644 --- a/howtos/redisgraph/redisgraphmovies/index.html +++ b/howtos/redisgraph/redisgraphmovies/index.html @@ -4,7 +4,7 @@ Building Movies database app using React, NodeJS and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

Building Movies database app using React, NodeJS and Redis

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

IMDb(Internet Movie Database) is the world's most popular and authoritative source for information on movies, TV shows and celebrities. This application is an IMDB clone with basic account authentication and movie recommendation functionality. You will learn the power of RedisGraph and NodeJS to build a simple movie database.

moviedb

Tech Stack

  • Frontend - React
  • Backend - Node.js, Redis, RedisGraph

Step 1. Install the pre-requisites

  • Node - v13.14.0+
  • NPM - v7.6.0+

Step 2. Run Redis Stack Docker container

 docker run -d -p 6379:6379 redis/redis-stack

Ensure that Docker container is up and running:

 docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

Step 3. Run RedisInsight Docker container

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Ensure that Docker container is up and runnig

 docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
264db1706dcc redislabs/redisinsight:latest "bash ./docker-entry…" About an hour ago Up About an hour 0.0.0.0:8001->8001/tcp angry_shirley
fd5ef30f025a redis/redis-stack "redis-server --load…" 2 hours ago Up 2 hours 0.0.0.0:6379->6379/tcp nervous_buck

Step 4. Clone the repository

 git clone https://github.com/redis-developer/basic-redisgraph-movie-demo-app-nodejs

Step 5. Setting up environment variables

Copy .env.sample to .env and add the following details:

  REDIS_ENDPOINT_URL = "Redis server URI"
REDIS_PASSWORD = "Password to the server"

Step 6. Install the dependencies

 npm install

Step 7. Run the backend server

 node app.js

Step 8. Run the client

 cd client
yarn install
yarn start

Step 9. Accessing the Movie app

Open http://IP:3000 to access the movie app

movieapp

Step 10. Sign up for a new account

moviedb

Enter the details to create a new account:

movieapp

Step 11. Sign-in to movie app

movieapp

Step 12. Rate the movie

movieapp

Step 13. View the list of rated movie

movieapp

Step 14. View directed movie over RedisInsight

 GRAPH.QUERY "MovieApp" "MATCH (director:Director {tmdbId: \"4945\"})-[:DIRECTED]->(movie:Movie) RETURN DISTINCT movie,director"

movieapp

Step 15. Find movies where actor acted in.

Run the below query under RedisGraph to find the author acted in a movie

 GRAPH.QUERY "MovieApp" "MATCH (actor:Actor {tmdbId: \"8537\"})-[:ACTED_IN_MOVIE]->(movie:Movie) RETURN DISTINCT movie,actor"

movieapp

Step 16. Store a user in a database

 CREATE (user:User {id: 32,
username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
RETURN user

movieapp

Step 17. Find a user by username

 MATCH (user:User {username: "user"}) RETURN user

movieapp

How it works?

The app consumes the data provided by the Express API and presents it through some views to the end user, including:

  • Home page
  • Sign-up and Login pages
  • Movie detail page
  • Actor and Director detail page
  • User detail page

Home page

How it works

The home page shows the genres and a brief listing of movies associated with them.

How the data is stored

Add a new genre:

 create (g:Genre{name:"Adventure"})

Add a movie:

 create (m:Movie {
url: "https://themoviedb.org/movie/862",
id:232,
languages:["English"],
title:"Toy Story",
countries:["USA"],
budget:30000000,
duration:81,
imdbId:"0114709",
imdbRating:8.3,
imdbVotes:591836,
movieId:42,
plot:"...",
poster:"https://image.tmd...",
poster_image:"https://image.tmdb.or...",
released:"1995-11-22",
revenue:373554033,
runtime:$runtime,
tagline:"A cowboy doll is profoundly t...",
tmdbId:"8844",
year:"1995"})

Set genre to a movie:

 MATCH (g:Genre), (m:Movie)
WHERE g.name = "Adventure" AND m.title = "Toy Story"
CREATE (m)-[:IN_GENRE]->(g)

How the data is accessed

Get genres:

 MATCH (genre:Genre) RETURN genre

Get moves by genre:

 MATCH (movie:Movie)-[:IN_GENRE]->(genre)
WHERE toLower(genre.name) = toLower("Film-Noir") OR id(genre) = toInteger("Film-Noir")
RETURN movie

Code example: Get movies with genre

 const getByGenre = function (session, genreId) {
const query = [
'MATCH (movie:Movie)-[:IN_GENRE]->(genre)',
'WHERE toLower(genre.name) = toLower($genreId) OR id(genre) = toInteger($genreId)',
'RETURN movie',
].join('\n');

return session
.query(query, {
genreId,
})
.then((result) => manyMovies(result));
};

Sign-up and Login pages

moviedb moviedb

To be able to rate movies a user needs to be logged in: for that a basic JWT-based authentication system is implemented, where user details are stored in the RedisGraph for persistence.

How the data is stored

Store user in the database:

 CREATE (user:User {id: 32,
username: "user", password: "hashed_password", api_key: "525d40da10be8ec75480"})
RETURN user

How the data is accessed

Find by user name:

 MATCH (user:User {username: "user"}) RETURN user

Code Example: Find user

 const me = function (session, apiKey) {
return session
.query('MATCH (user:User {api_key: $api_key}) RETURN user', {
api_key: apiKey,
})
.then((foundedUser) => {
if (!foundedUser.hasNext()) {
throw {message: 'invalid authorization key', status: 401};
}
while (foundedUser.hasNext()) {
const record = foundedUser.next();
return new User(record.get('user'));
}
});
};

Movie detail page

How it works

On this page a user can rate the film and view the Actors/directors who participated in the production of the film.

How the data is stored

Associate actor with a movie:

 MATCH (m:Movie) WHERE m.title="Jumanji" CREATE (a:Actor :Person{
bio:"Sample...",
bornIn:"Denver, Colorado, USA",
imdbId:"0000245",
name:"Robin Williams",
poster:"https://image.tmdb.org/t/p/w440_and_...",
tmdbId:"2157",
url:"https://themoviedb.org/person/2157"})-[r:ACTED_IN_MOVIE
{role: "Alan Parrish"}]->(m)

Associate director with a movie:

 MATCH (m:Movie) WHERE m.title="Dead Presidents" CREATE (d:Director :Person{
bio: "From Wikipedia, the free e...",
bornIn: "Detroit, Michigan, USA",
imdbId: "0400436",
name: "Albert Hughes",
tmdbId: "11447",
url: "https://themoviedb.org/person/11447"})-[r:DIRECTED]->(m)

How the data is accessed

Find movie by id with genre, actors and director:

 MATCH (movie:Movie {tmdbId: $movieId})
OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: "e1e3991f-fe81-439e-a507-aa0647bc0b88"})
OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)
OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)
OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)
WITH DISTINCT movie, my_rated, genre, d, a, r
RETURN DISTINCT movie,
collect(DISTINCT d) AS directors,
collect(DISTINCT a) AS actors,
collect(DISTINCT genre) AS genres

Code Example: Get movie detail

 const getById = function (session, movieId, userId) {
if (!userId) throw {message: 'invalid authorization key', status: 401};
const query = [
'MATCH (movie:Movie {tmdbId: $movieId})\n' +
' OPTIONAL MATCH (movie)<-[my_rated:RATED]-(me:User {id: $userId})\n' +
' OPTIONAL MATCH (movie)<-[r:ACTED_IN_MOVIE]-(a:Actor)\n' +
' OPTIONAL MATCH (movie)-[:IN_GENRE]->(genre:Genre)\n' +
' OPTIONAL MATCH (movie)<-[:DIRECTED]-(d:Director)\n' +
' WITH DISTINCT movie, my_rated, genre, d, a, r\n' +
' RETURN DISTINCT movie,\n' +
' collect(DISTINCT d) AS directors,\n' +
' collect(DISTINCT a) AS actors,\n' +
' collect(DISTINCT genre) AS genres',
].join(' ');
return session
.query(query, {
movieId: movieId.toString(),
userId: userId.toString(),
})
.then((result) => {
if (result.hasNext()) {
return _singleMovieWithDetails(result.next());
}
throw {message: 'movie not found', status: 404};
});
};

Actor and Director detail page

How it works

How the data is accessed

Find movies where actor acted in:

 MATCH (actor:Actor {tmdbId: "8537"})-[:ACTED_IN_MOVIE]->(movie:Movie)
RETURN DISTINCT movie,actor

Find movies directed by:

 MATCH (director:Director {tmdbId: "4945"})-[:DIRECTED]->(movie:Movie)
RETURN DISTINCT movie,director

Get movies directed by

 const getByDirector = function (session, personId) {
const query = [
'MATCH (director:Director {tmdbId: $personId})-[:DIRECTED]->(movie:Movie)',
'RETURN DISTINCT movie,director',
].join('\n');

return session
.query(query, {
personId,
})
.then((result) => manyMovies(result));
};

User detail page

How it works

Shows the profile info and movies which were rated by user

How the data is stored

Set rating for a movie:

 MATCH (u:User {id: 42}),(m:Movie {tmdbId: 231})
MERGE (u)-[r:RATED]->(m)
SET r.rating = "7"
RETURN m

How the data is accessed

Get movies and user ratings:

 MATCH (:User {id: "d6b31131-f203-4d5e-b1ff-d13ebc06934d"})-[rated:RATED]->(movie:Movie)
RETURN DISTINCT movie, rated.rating as my_rating

Get rated movies for user

 const getRatedByUser = function (session, userId) {
return session
.query(
'MATCH (:User {id: $userId})-[rated:RATED]->(movie:Movie) \
RETURN DISTINCT movie, rated.rating as my_rating',
{userId},
)
.then((result) =>
result._results.map((r) => new Movie(r.get('movie'), r.get('my_rating'))),
);
};

Data types:

  • The data is stored in various keys and various relationships.
    • There are 5 types of data
      • User
      • Director
      • Actor
      • Genre
      • Movie

Each type has its own properties

  • Actor: id, bio, born , bornIn, imdbId, name, poster, tmdbId, url
  • Genre: id, name
  • Director: id, born, bornIn, imdbId, name, tmdbId, url
  • User: id, username, password, api_key
  • Movie: id, url, languages, countries, budget, duration, imdbId, imdbRating, indbVotes, movieId, plot, poster, poster_image, released, revenue, runtime, tagline, tmdbId, year

And there are 4 types of relationship:

  • User-RATED->Movie
  • Director-DIRECTED->Movie
  • Actor-ACTED_IN_MOVIE->Movie
  • Movie-IN_GENRE->Genre

References

- + \ No newline at end of file diff --git a/howtos/redisgraph/using-dotnet/index.html b/howtos/redisgraph/using-dotnet/index.html index b46c46a93b..c8515fc402 100644 --- a/howtos/redisgraph/using-dotnet/index.html +++ b/howtos/redisgraph/using-dotnet/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using .NET | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to query Graph data in Redis using .NET

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

RedisGraph enables you to store and query graph data in Redis using the [Cypher Query Language](https://opencypher.org/). In this article, we will discuss the usage of RedisGraph with .NET.

NRedisGraph

We'll use the NRedisGraph package in this tutorial. To install the package in your project, use dotnet add package NRedisGraph.

Running RedisGraph

The easiest way to get up and running with RedisGraph locally is to use the RedisGraph docker image:

docker run -p 6379:6379 redis/redis-stack-server:latest

The above command will start an instance of Redis locally with the RedisGraph module loaded, and you will be able to connect to it on localhost:6379

Connecting

NRedisGraph makes use of the StackExchange.Redis project which is installed along with NRedisGraph. To create the RedisGraph object you'll first create a ConnectionMultiplexer, and pull a reference to an IDatabase object from it, and then initialize the RedisGraph with the IDatabase object:

var muxer = ConnectionMultiplexer.Connect("localhost");
var db = muxer.GetDatabase();
var graph = new RedisGraph(db);

Sending a Query

Querying in RedisGraph applies to a wide array of operations, but fundamentally when executing queries with NRedisGraph, all you need to do is execute graph.Query or graph.QueryAsync passing in the name of the graph you want to query and the query you want to run. For example, we'll be using the graph pets for the remainder of this tutorial, pets is the name of the key the graph will be stored at. Hence any call to graph.Query or graph.QueryAsync will first pass in pets to indicate the graph to work with.

Creating a Node

To create a node in RedisGraph, you'll use the Create Operation. Let's start by making 2 Humans, Alice and Bob:

var createBobResult = await graph.QueryAsync("pets", "CREATE(:human{name:'Bob',age:32})");
await graph.QueryAsync("pets", "CREATE(:human{name:'Alice',age:30})");

Running a Query against RedisGraph will result in a ResultSet. This result will contain some metadata about the result of the query in the Statistics section and any results generated by the query. In the above case, the only thing returned is the statistics for the query, which you can print out directly from the results object:

Console.WriteLine($"Nodes Created:{createBobResult.Statistics.NodesCreated}");
Console.WriteLine($"Properties Set:{createBobResult.Statistics.PropertiesSet}");
Console.WriteLine($"Labels Created:{createBobResult.Statistics.LabelsAdded}");
Console.WriteLine($"Operation took:{createBobResult.Statistics.QueryInternalExecutionTime}");

You can create nodes with other labels by simply executing another CREATE statement. For example, if we wanted to create a 'pet' named 'Honey' who is a 5-year-old greyhound, we would run:

await graph.QueryAsync("pets", "CREATE(:pet{name:'Honey',age:5,species:'canine',breed:'Greyhound'})");

Creating Relationships

Like creating nodes, you can also create relationships in RedisGraph using the Query/QueryAsync commands to create relationships between nodes in RedisGraph. For example, to establish the owner relationship between Bob and the Greyhound Honey, you would use the following:

await graph.QueryAsync("pets",
"MATCH(a:human),(p:pet) WHERE(a.name='Bob' and p.name='Honey') CREATE (a)-[:OWNS]->(p)");

You could establish other relationships as well between nodes, say, for example, both Bob and Alice both walk Honey you could add the connections:

await graph.QueryAsync("pets",
"MATCH(a:human),(p:pet) WHERE(a.name='Alice' and p.name='Honey') CREATE (a)-[:WALKS]->(p)");
await graph.QueryAsync("pets",
"MATCH(a:human),(p:pet) WHERE(a.name='Bob' and p.name='Honey') CREATE (a)-[:WALKS]->(p)");

Querying Relationships

Now that we've created a few Nodes and Relationships between nodes, we can query things in the Graph, again using Query and QueryAsync. So, for example, if we wanted to find all of Honey's owners, we would issue the following query:

var matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:OWNS]->(p) and p.name='Honey' return a");

We can then iterate over the resultant matches, which is the same ResultSet class we were using before, but it will have actual results we can access this time.

foreach (var match in matches)
{
Console.WriteLine(((Node) match.Values.First()).PropertyMap["name"].Value);
}

We can also find all the walkers of Honey by finding all the human's who have a WALKS relationship with Honey:

matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:WALKS]->(p) and p.name='Honey' return a");

Then if we wanted to find all of Bob's dogs, we would query the graph and find all the canines who have an OWNS relationship with a human named Bob:

matches = await graph.QueryAsync("pets", "MATCH(a:human),(p:pet) where (a)-[:OWNS]->(p) and p.species='canine' and a.name='Bob' return p");

Resources

  • Code for this demo is available in GitHub
  • To learn more about RedisGraph, check out the docs site
  • To learn more about The Cypher Query Language, check out opencypher.org
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-go/index.html b/howtos/redisgraph/using-go/index.html index 997dab9378..a95200f8fc 100644 --- a/howtos/redisgraph/using-go/index.html +++ b/howtos/redisgraph/using-go/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Go | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to query Graph data in Redis using Go

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

RedisGraph Go Client

The redisgraph-go is a Golang client for the RedisGraph module. It relies on redigo for Redis connection management and provides support for RedisGraph's QUERY, EXPLAIN, and DELETE commands.

Follow the steps below to get started with RedisGraph with Go:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Loading the RedisGraph Module

 go get github.com/redislabs/redisgraph-go

Step 4. Clone the repository

 git clone https://github.com/RedisGraph/redisgraph-go

Step 5. Running the Test suite

 go test
found packages redisgraph (client_test.go) and main (main.go) in /Users/ajeetraina/projects/redisgraph-go

Step 6. Running the Go Program

 go run main.go
+----------+-------+--------+
| p.name | p.age | c.name |
+----------+-------+--------+
| John Doe | 33 | Japan |
+----------+-------+--------+

Cached execution 0.000000
Query internal execution time 3.031700
Visited countries by person:

Name: John Doe

Age: 33
Pathes of persons vi

Step 7. Monitor the Graph query

 redis-cli
127.0.0.1:6379> monitor
OK
1633495122.588292 [0 172.17.0.1:58538] "GRAPH.DELETE" "social"
1633495122.589641 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CREATE (UPoQSvSnBD:person{gender:\"male\",status:\"single\",name:\"John Doe\",age:33}),(ZNxbsnHGoO:country{name:\"Japan\"}),(UPoQSvSnBD)-[:visited]->(ZNxbsnHGoO)" "--compact"
1633495122.591407 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "MATCH (p:person)-[v:visited]->(c:country)\n RETURN p.name, p.age, c.name" "--compact"
1633495122.593040 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "MATCH p = (:person)-[:visited]->(:country) RETURN p" "--compact"
1633495122.594405 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.labels()" "--compact"
1633495122.595552 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.propertyKeys()" "--compact"
1633495122.596942 [0 172.17.0.1:58538] "GRAPH.QUERY" "social" "CALL db.relationshipTypes()" "--compact"

Step 8. Install RedisInsight

Run the RedisInsight container. The easiest way is to run the following command:

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 9. Accessing RedisInsight

Next, point your browser to http://localhost:8001.

Step 10. Run the Graph Query

You can use the limit clause to limit the number of records returned by a query:

GRAPH.QUERY "social" "MATCH (n) RETURN n"

My Image

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-javascript/index.html b/howtos/redisgraph/using-javascript/index.html index 8bea99fdd9..74d36ddbcc 100644 --- a/howtos/redisgraph/using-javascript/index.html +++ b/howtos/redisgraph/using-javascript/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using JavaScript | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to query Graph data in Redis using JavaScript

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

RedisGraph JavaScript Client

Follow the steps below to get started with RedisGraph with Java:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Clone the repository

 git clone https://github.com/RedisGraph/redisgraph.js

Step 4. Install the packages locally

 npm install redisgraph.js

Step 5. Write a JavaScript code

const RedisGraph = require('redisgraph.js').Graph;

let graph = new RedisGraph('social');

(async () => {
await graph.query("CREATE (:person{name:'roi',age:32})");
await graph.query("CREATE (:person{name:'amit',age:30})");
await graph.query(
"MATCH (a:person), (b:person) WHERE (a.name = 'roi' AND b.name='amit') CREATE (a)-[:knows]->(b)",
);

// Match query.
let res = await graph.query(
'MATCH (a:person)-[:knows]->(:person) RETURN a.name',
);
while (res.hasNext()) {
let record = res.next();
console.log(record.get('a.name'));
}
console.log(res.getStatistics().queryExecutionTime());

// Match with parameters.
let param = { age: 30 };
res = await graph.query('MATCH (a {age: $age}) return a.name', param);
while (res.hasNext()) {
let record = res.next();
console.log(record.get('a.name'));
}

// Named paths matching.
res = await graph.query('MATCH p = (a:person)-[:knows]->(:person) RETURN p');
while (res.hasNext()) {
let record = res.next();
// See path.js for more path API.
console.log(record.get('p').nodeCount);
}
graph.deleteGraph();
graph.close();
})();

Save the above file as "app.js".

Step 6. Execute the Script

 node app.js
 roi
0.1789
amit
2

Step 7. Monitor the Graph query

 1632898652.415702 [0 172.17.0.1:64144] "info"
1632898652.418225 [0 172.17.0.1:64144] "graph.query" "social" "CREATE (:person{name:'roi',age:32})" "--compact"
1632898652.420399 [0 172.17.0.1:64144] "graph.query" "social" "CREATE (:person{name:'amit',age:30})" "--compact"
1632898652.421857 [0 172.17.0.1:64144] "graph.query" "social" "MATCH (a:person), (b:person) WHERE (a.name = 'roi' AND b.name='amit') CREATE (a)-[:knows]->(b)" "--compact"
1632898652.424911 [0 172.17.0.1:64144] "graph.query" "social" "MATCH (a:person)-[:knows]->(:person) RETURN a.name" "--compact"
1632898652.429658 [0 172.17.0.1:64144] "graph.query" "social" "CYPHER age=30 MATCH (a {age: $age}) return a.name" "--compact"
1632898652.431221 [0 172.17.0.1:64144] "graph.query" "social" "MATCH p = (a:person)-[:knows]->(:person) RETURN p" "--compact"
1632898652.433146 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.labels()" "--compact"
1632898652.434781 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.propertyKeys()" "--compact"
1632898652.436574 [0 172.17.0.1:64144] "graph.query" "social" "CALL db.relationshipTypes()" "--compact"
1632898652.438559 [0 172.17.0.1:64144] "graph.delete" "social"

Step 8. Install RedisInsight

Run the RedisInsight container. The easiest way is to run the following command:

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 9. Accessing RedisInsight

Next, point your browser to http://localhost:8001.

Step 10. Run the Graph Query

You can display the number of records returned by a query:

My Image

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-python/index.html b/howtos/redisgraph/using-python/index.html index 0c2e6274ba..67804e0d8d 100644 --- a/howtos/redisgraph/using-python/index.html +++ b/howtos/redisgraph/using-python/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Python | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to query Graph data in Redis using Python

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. Show how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon. Query graphs using the industry-standard Cypher query language and easily use graph capabilities from application code.

My Image

RedisGraph Python Client

The 'redisgraph-py' is a package that allows querying Graph data in a Redis database that is extended with the RedisGraph module. The package extends redisgraph-py's interface with RedisGraph's API

Follow the steps below to get started with RedisGraph with Python:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Loading the Python Module

 pip install redisgraph

Step 4. Write a python code

 import redis
from redisgraph import Node, Edge, Graph, Path

r = redis.Redis(host='localhost', port=6379)

redis_graph = Graph('social', r)

john = Node(label='person', properties={'name': 'John Doe', 'age': 33, 'gender': 'male', 'status': 'single'})
redis_graph.add_node(john)

japan = Node(label='country', properties={'name': 'Japan'})
redis_graph.add_node(japan)

edge = Edge(john, 'visited', japan, properties={'purpose': 'pleasure'})
redis_graph.add_edge(edge)

redis_graph.commit()

query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country)
RETURN p.name, p.age, v.purpose, c.name"""

result = redis_graph.query(query)

# Print resultset
result.pretty_print()

# Use parameters
params = {'purpose':"pleasure"}
query = """MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)
RETURN p.name, p.age, v.purpose, c.name"""

result = redis_graph.query(query, params)

# Print resultset
result.pretty_print()

# Use query timeout to raise an exception if the query takes over 10 milliseconds
result = redis_graph.query(query, params, timeout=10)

# Iterate through resultset
for record in result.result_set:
person_name = record[0]
person_age = record[1]
visit_purpose = record[2]
country_name = record[3]

query = """MATCH p = (:person)-[:visited {purpose:"pleasure"}]->(:country) RETURN p"""

result = redis_graph.query(query)

# Iterate through resultset
for record in result.result_set:
path = record[0]
print(path)


# All done, remove graph.
redis_graph.delete()

Step 5. Execute the Python Script

  python3 test.py
+-----------+----------+--------------+-----------+
| b'p.name' | b'p.age' | b'v.purpose' | b'c.name' |
+-----------+----------+--------------+-----------+
| John Doe | 33 | pleasure | Japan |
+-----------+----------+--------------+-----------+

Cached execution 0.0
internal execution time 3.3023
+-----------+----------+--------------+-----------+
| b'p.name' | b'p.age' | b'v.purpose' | b'c.name' |
+-----------+----------+--------------+-----------+
| John Doe | 33 | pleasure | Japan |
+-----------+----------+--------------+-----------+

Cached execution 0.0
internal execution time 0.2475
<(0)-[0]->(1)>

Step 6. Monitor the Graph query

 127.0.0.1:6379> monitor
OK
1632661901.024018 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CREATE (youkjweasb:person{age:33,gender:\"male\",name:\"John Doe\",status:\"single\"}),(jilbktlmgw:country{name:\"Japan\"}),(youkjweasb:person{age:33,gender:\"male\",name:\"John Doe\",status:\"single\"})-[:visited{purpose:\"pleasure\"}]->(jilbktlmgw:country{name:\"Japan\"})" "--compact"
1632661901.025810 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "MATCH (p:person)-[v:visited {purpose:\"pleasure\"}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact"
1632661901.027485 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CYPHER purpose=\"pleasure\" MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact"
1632661901.029539 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "CYPHER purpose=\"pleasure\" MATCH (p:person)-[v:visited {purpose:$purpose}]->(c:country)\n\t\t RETURN p.name, p.age, v.purpose, c.name" "--compact" "timeout" "10"
1632661901.030965 [0 172.17.0.1:61908] "GRAPH.QUERY" "social" "MATCH p = (:person)-[:visited {purpose:\"pleasure\"}]->(:country) RETURN p" "--compact"
1632661901.032250 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.labels()" "--compact"
1632661901.033323 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.propertyKeys()" "--compact"
1632661901.034589 [0 172.17.0.1:61908] "GRAPH.RO_QUERY" "social" "CALL db.relationshipTypes()" "--compact"
1632661901.035625 [0 172.17.0.1:61908] "GRAPH.DELETE" "social"

Let us comment out the last line. Try to query Graph data over over RedisInsight.

Step 7. Install RedisInsight

Run the RedisInsight container. The easiest way is to run the following command:

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 8. Accessing RedisInsight

Next, point your browser to http://localhost:8001.

Step 9. Run the Graph Query

You can use the limit clause to limit the number of records returned by a query:

MATCH (n) RETURN n LIMIT 1

My Image

Step 10. Run the Graph Query with no LIMIT

MATCH (n) RETURN n

My Image

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-redisinsight/index.html b/howtos/redisgraph/using-redisinsight/index.html index 5b3dee7932..be155014cc 100644 --- a/howtos/redisgraph/using-redisinsight/index.html +++ b/howtos/redisgraph/using-redisinsight/index.html @@ -4,7 +4,7 @@ How to visualize Graph data using RedisInsight | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to visualize Graph data using RedisInsight

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

If you’re a Redis user who prefers to use a Graphical User Interface(GUI) for graph queries, then RedisInsight is a right tool for you. It’s 100% free pure desktop Redis GUI that provides easy-to-use browser tools to query, visualize and interactively manipulate graphs. You can add new graphs, run queries and explore the results over the GUI tool.

RedisInsight supports RedisGraph and allows you to:

  • Build and execute queries
  • Navigate your graphs
  • Browse, analyze, and export results
  • Keyboard shortcuts to zoom
  • Button to reset view; center entire graph
  • Zoom capability via mouse wheel(Double right-click to zoom out, Double right-click to zoom out.)
  • Ability to copy commands with a button click
  • Ability to persist nodes display choices between queries

As a benefit, you get faster turnarounds when building your application using Redis and RedisGraph.

Follow the below steps to see how your data is connected via the RedisInsight Browser tool.

Step 1. Create Redis database

Follow this link to create a Redis database using Redis Enterprise Cloud with RedisGraph module enabled

alt_text

Step 2: Download RedisInsight

To install RedisInsight on your local system, you need to first download the software from the Redis website.

Click this link to access a form that allows you to select the operating system of your choice.

My Image

Run the installer. After the web server starts, open http://YOUR_HOST_IP:8001 and add a Redis database connection.

Select "Connect to a Redis database" My Image

Enter the requested details, including Name, Host (endpoint), Port, and Password. Then click “ADD REDIS DATABASE”.

Step 3: Click “RedisGraph” and then “Add Graph”

Select RedisGraph from the menu.

alt_text

Step 4. Create a new Graph called “Friends”

alt_text

Let us add individuals to the graph. CREATE is used to introduce new nodes and relationships.Run the below cypher query on RedisInsight GUI to add a label called person and property called “name”.

CREATE (:Person{name:"Tom" }),  (:Person{name:"Alex" }), (:Person{name:"Susan" }), (:Person{name:"Bill" }), (:Person{name:"Jane" })

alt_text

As we see that “1” label is added and that refers to a person label. It’s the same for every node and hence created once. Overall there are 5 nodes created. The five “name” properties refer to 5 name properties that have been added.

Step 6: View all the individuals (nodes)

Match describes the relationship between queried entities, using ascii art to represent pattern(s) to match against. Nodes are represented by parentheses () , and Relationships are represented by brackets [] .

As shown below, we have added lowercase “p” in front of our label and is a variable we can make a reference to. It returns all the nodes with a label called “Person”.

MATCH (p:Person) RETURN p

alt_text

You can select "Graph View" on the right menu to display the graphical representation as shown below:

alt_text

Step 7. Viewing just one individual(node)

MATCH (p:Person {name:"Tom"}) RETURN p

alt_text

Step 8: Visualize the relationship between the individuals

Run the below query to build a relationship between two nodes and how the relationship flows from one node(“Tom”) to the another node(“Alex”).

MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Alex" }) CREATE (p1)-[:Knows]->(p2)

The symbol “>” (greater than) shows which way the relationship flows.

alt_text

You can view the relationship in the form of graph as shown below:

alt_text

Step 9. Create and visualize the multiple relationships

Run the below query to create and visualize relationsship between the multiple individuals

MATCH (p1:Person {name: "Tom" }), (p2:Person {name: "Susan" }), (p3:Person {name: "Bill" }) CREATE (p1)-[:Knows]->(p2), (p1)-[:Knows]->(p3)

alt_text

Step 10. Create and visualize the relationship between two individuals (Susan and Bill)

Let us look at how to generate graph showcasing the relationship between two individuals - Susan and Bill

MATCH (p1:Person {name: "Susan"}), (p2:Person {name: "Bill"}) CREATE (p1)-[:Knows]->(p2)

alt_text

Step 11. Create and visualize the relationship between two indiviual (Bill and Jane)

MATCH (p1:Person {name: "Bill"}), (p2:Person {name: "Jane"}) CREATE (p1)-[:Knows]->(p2)

alt_text

alt_text

Step 12. Building a social networking

This can be achieved by “friend of friends” kind of relationship. Say, If Tom wanted to social network with Jane. He has two contacts that know Jane - one is Susan and the other person is Bill.

alt_text

MATCH p = (p1:Person {name: "Tom" })-[:Knows*1..3]-(p2:Person {name: "Jane"}) RETURN p

In this query, we assign a variable “p” to a node graph path. We search for “Tom” as p1 and “Jane” as “p2”. We say interested in knows link with 1..3 degree of separation.

alt_text

Step 13. Cleaning up the Graph

alt_text

Importing the Bulk Graph data

Let us try to insert bulk data using Python and then extrapolate it in the form of nodes and relationships.

Step 14. Cloning the repository**

$ git clone https://github.com/redis-developer/redis-datasets
cd redis-datasets/redisgraph/datasets/iceandfire

Step 15. Execute the script

$ python3 bulk_insert.py GOT_DEMO -n data/character.csv -n data/house.csv -n data/book.csv -n data/writer.csv -r data/wrote.csv -r data/belongs.csv -h 192.168.1.9 -p 6379



2124 nodes created with label 'b'character''
438 nodes created with label 'b'house''
12 nodes created with label 'b'book''
3 nodes created with label 'b'writer''
14 relations created for type 'b'wrote''
2208 relations created for type 'b'belongs''
Construction of graph 'GOT_DEMO' complete: 2577 nodes created, 2222 relations created in 0.169954 seconds


Step 16. Run the cypher query

GRAPH.QUERY GOT_DEMO "MATCH (w:writer)-[wrote]->(b:book) return w,b"

alt_text

Additional Resources

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-ruby/index.html b/howtos/redisgraph/using-ruby/index.html index f80ccfbe15..fcaac45207 100644 --- a/howtos/redisgraph/using-ruby/index.html +++ b/howtos/redisgraph/using-ruby/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Ruby | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to query Graph data in Redis using Ruby

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the first queryable Property Graph database to use sparse matrices to represent the adjacency matrix in graphs and linear algebra to query the graph. Few of the notable features of RedisGraph includes:

  • Based on the Property Graph Model
  • Nodes (vertices) and Relationships (edges) that may have attributes
  • Nodes that can be labeled
  • Relationships have a relationship type
  • Graphs represented as sparse adjacency matrices
  • Cypher as query language
  • Cypher queries translated into linear algebra expressions

RedisGraph is based on a unique approach and architecture that translates Cypher queries to matrix operations executed over a GraphBLAS engine. This new design allows use cases like social graph operation, fraud detection, and real-time recommendation to be executed 10x – 600x faster than any other graph database.

RedisGraph Ruby Client

redisgraph-rb is a Ruby gem client for the RedisGraph module. It relies on redis-rb for Redis connection management and provides support for graph QUERY, EXPLAIN, and DELETE commands.

Follow the steps below to get started with RedisGraph with Ruby:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Loading the RedisGraph Ruby Module

 gem install redisgraph
Fetching redisgraph-2.0.3.gem
Successfully installed redisgraph-2.0.3
1 gem installed

Step 4. Install the prerequisites

To ensure prerequisites are installed, run the following: bundle install

 bundle install

Step 5. Write a Ruby code

Copy the below sample code and save it in a file "test.rb"

 require 'redisgraph'

graphname = "sample"

r = RedisGraph.new(graphname)

cmd = """CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})"""
response = r.query(cmd)
response.stats

cmd = """MATCH ()-[:works]->(e:employer) RETURN e"""

response = r.query(cmd)

response.print_resultset

Step 6. Execute the Ruby code

  ruby test.rb

Step 7. Monitor the Graph query

 redis-cli
127.0.0.1:6379> monitor
OK
1632716792.038955 [0 172.17.0.1:57804] "info"
1632716792.041201 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})" "--compact"
1632716792.042751 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "MATCH ()-[:works]->(e:employer) RETURN e" "--compact"
1632716792.044241 [0 172.17.0.1:57804] "GRAPH.QUERY" "sample" "CALL db.propertyKeys()"
1632716812.060458 [0 172.17.0.1:57962] "COMMAND"
1632716813.148710 [0 172.17.0.1:57962] "GRAPH.QUERY" "sample" "CREATE (:person {name: 'Jim', age: 29})-[:works]->(:employer {name: 'Dunder Mifflin'})" "--compact"

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/redisgraph/using-rust/index.html b/howtos/redisgraph/using-rust/index.html index 0f0aa8a4be..c4cd86e723 100644 --- a/howtos/redisgraph/using-rust/index.html +++ b/howtos/redisgraph/using-rust/index.html @@ -4,7 +4,7 @@ How to query Graph data in Redis using Rust | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to query Graph data in Redis using Rust

End-of-Life Notice

Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

End of support is scheduled for January 31, 2025.

Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).


Profile picture for Ajeet Raina
Author:
Ajeet Raina, Former Developer Growth Manager at Redis

RedisGraph is the first queryable Property Graph database to use sparse matrices to represent the adjacency matrix in graphs and linear algebra to query the graph. RedisGraph is based on a unique approach and architecture that translates Cypher queries to matrix operations executed over a GraphBLAS engine. This new design allows use cases like social graph operation, fraud detection, and real-time recommendation to be executed 10x – 600x faster than any other graph database. Undoubtedly, it is the fastest graph database that processes complex graph operations in real time, 10x – 600x faster than any other graph database. It primariy shows how your data is connected through multiple visualization integrations including RedisInsight, Linkurious, and Graphileon.

RedisGraph is a graph database developed from scratch on top of Redis, using the new Redis Modules API to extend Redis with new commands and capabilities. Its main features include: Simple, fast indexing and querying data stored in RAM using memory-efficient custom data structure. Redis Graph is a directed graph where both nodes and relationships are typed - nodes with labels and edges with types. Node/s and edges can and often do contain properties like columns in a SQL-db or keys in a document store.The newer RedisGraph 2.0 benchmark reveals a significant improvements on parallel workload (multiple clients) with a latency improvements up to 6x and throughput improvements up to 5x when performing graph traversals.

Below are the primary use cases of RedisGraph:

  • Recommendation: It allows you to rapidly find connections between your customers and the experiences they want by examining the relationships between them.
  • Graph-aided search: It allows you to search for single or multiple words or phrases and execute full-text and linguistic queries and implementation in real time over your graph structure.
  • Identity and access management: It allows you to define complex resources access permissions as a graph and enable rapid real-time verification of these permissions with a single query.

RedisGraph Rust Client

The Rust programming language is blazingly fast and memory-efficient: with no runtime or garbage collector, it can power performance-critical services, run on embedded devices, and easily integrate with other languages. It is an open-source project developed originally at Mozilla Research. The Rust Library is the foundation of portable Rust software, a set of minimal and battle-tested shared abstractions for the broader Rust ecosystem.

redisgraph-rs is an idiomatic Rust client for RedisGraph, the graph database by Redis.This crate parses responses from RedisGraph and converts them into ordinary Rust values. It exposes a very flexible API that allows you to retrieve a single value, a single record or multiple records using only one function: Graph::query.

Follow the steps below to get started with RedisGraph with Rust:

Step 1. Run Redis Stack Docker container

 docker run -p 6379:6379 --name redis/redis-stack

Step 2. Verify if RedisGraph module is loaded

 info modules
# Modules
module:name=graph,ver=20405,api=1,filters=0,usedby=[],using=[],options=[]

Step 3. Install Rust

 brew install rust

Step 4. Clone the repository

  git clone https://github.com/malte-v/redisgraph-rs

Step 5. Write a rust program

Copy the below content and save it as "main.rs" under src directory.

 use redis::Client;
use redisgraph::{Graph, RedisGraphResult};

fn main() -> RedisGraphResult<()> {
let client = Client::open("redis://127.0.0.1:6379")?;
let mut connection = client.get_connection()?;

let mut graph = Graph::open(connection, "MotoGP".to_string())?;

// Create six nodes (three riders, three teams) and three relationships between them.
graph.mutate("CREATE (:Rider {name: 'Valentino Rossi', birth_year: 1979})-[:rides]->(:Team {name: 'Yamaha'}), \
(:Rider {name:'Dani Pedrosa', birth_year: 1985, height: 1.58})-[:rides]->(:Team {name: 'Honda'}), \
(:Rider {name:'Andrea Dovizioso', birth_year: 1986, height: 1.67})-[:rides]->(:Team {name: 'Ducati'})")?;

// Get the names and birth years of all riders in team Yamaha.
let results: Vec<(String, u32)> = graph.query("MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year")?;
// Since we know just one rider in our graph rides for team Yamaha,
// we can also write this and only get the first record:
let (name, birth_year): (String, u32) = graph.query("MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year")?;
// Let's now get all the data about the riders we have.
// Be aware of that we only know the height of some riders, and therefore we use an `Option`:
let results: Vec<(String, u32, Option<f32>)> = graph.query("MATCH (r:Rider) RETURN r.name, r.birth_year, r.height")?;

// That was just a demo; we don't need this graph anymore. Let's delete it from the database:
//graph.delete()?;

Ok(())

Step 6. Run the current local package

 cargo run

Step 7. Monitor the Graph query

 1633515550.109594 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "CREATE (dummy:__DUMMY_LABEL__)" "--compact"
1633515550.111727 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (dummy:__DUMMY_LABEL__) DELETE dummy" "--compact"
1633515550.114948 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "CREATE (:Rider {name: 'Valentino Rossi', birth_year: 1979})-[:rides]->(:Team {name: 'Yamaha'}), (:Rider {name:'Dani Pedrosa', birth_year: 1985, height: 1.58})-[:rides]->(:Team {name: 'Honda'}), (:Rider {name:'Andrea Dovizioso', birth_year: 1986, height: 1.67})-[:rides]->(:Team {name: 'Ducati'})" "--compact"
1633515550.118380 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year" "--compact"
1633515550.120766 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = 'Yamaha' RETURN r.name, r.birth_year" "--compact"
1633515550.122505 [0 172.17.0.1:55114] "GRAPH.QUERY" "MotoGP" "MATCH (r:Rider) RETURN r.name, r.birth_year, r.height" "--compact"
1633515550.124045 [0 172.17.0.1:55114] "GRAPH.DELETE" "MotoGP"

Step 8. Install RedisInsight

For this demo, we will be using RedisInsight Docker container as shown below:

 docker run -d -v redisinsight:/db -p 8001:8001 redislabs/redisinsight:latest

Step 8. Accessing RedisInsight

Next, point your browser to http://localhost:8001.

Step 9. Run the Graph Query

You can use the limit clause to limit the number of records returned by a query:

 GRAPH.QUERY "MotoGP" "MATCH (r:Rider) RETURN r.name, r.birth_year, r.height"

My Image

References

  • Learn more about RedisGraph in the Quickstart tutorial.

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/security/index.html b/howtos/security/index.html index 003ba27faa..82982e61c9 100644 --- a/howtos/security/index.html +++ b/howtos/security/index.html @@ -4,7 +4,7 @@ How to Use SSL/TLS With Redis Enterprise | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Use SSL/TLS With Redis Enterprise

Header

In this article, you will see how to secure your Redis databases using SSL (Secure Sockets Layer). In the production environment, it is always recommended to use SSL to protect the data that moves between various computers (client applications and Redis servers). Transport Level Security (TLS) guarantees that only allowed applications/computers are connected to the database, and also that data is not viewed or altered by a middle man process.

You can secure the connections between your client applications and Redis cluster using:

  • One-Way SSL: the client (your application) get the certificate from the server (Redis cluster), validate it, and then all communications are encrypted
  • Two-Way SSL: (aka mutual SSL) here both the client and the server authenticate each other and validate that both ends are trusted.

In this article, we will focus on the Two-Way SSL, and using Redis Enterprise.

Prerequisites

  • A Redis Enterprise 6.0.x database, (my database is protected by the password secretdb01, and listening on port 12000)
  • redis-cli to run basic commands
  • Node, and Java installed if you want to test various languages.

Simple Test

Step 1. Run a Redis server

You can either run Redis server in a Docker container or directly on your machine. Use the following commands to setup a Redis server locally on Mac OS:

 brew tap redis-stack/redis-stack
brew install --cask redis-stack
INFO

Redis Stack unifies and simplifies the developer experience of the leading Redis modules and the capabilities they provide. Redis Stack supports the folliwng in additon to Redis: JSON, Search, Time Series, Triggers and Functions, and Probilistic data structures.

Let's make sure that the database is available:

redis-cli -p 12000 -a secretdb01 INFO SERVER

This should print the Server information.

Step 2. Get the Certificate from Redis Cluster

Assuming that you have an access to the Redis Enterprise Cluster, you need to access the nodes to retrieve the certificate (that is a self-generated one by default).

The cluster certificate is located at: /etc/opt/redislabs/proxy_cert.pem.

Next, copy the cluster certificate on each client machine; note that once it is done you can use this certificate to connect using "One-Way SSL", but this is not just the purpose of this article.

In this tutorial, we will be using Docker to copy the certificate.

docker cp redis-node1:/etc/opt/redislabs/proxy_cert.pem ./certificates

Step 3. Generate a New Client Certificate

Using the Two-Way SSL, you need to have a certificate for the client that will be used by Redis database proxy to trust the client. In this tutorial, we will use a self-signed certificate using OpenSSL. We will be creating a certificate for an application named app_001. Please note that you can create as many certificates as you want, or reuse this one for all servers/applications.

Open a terminal and run the following commands:


openssl req \
-nodes \
-newkey rsa:2048 \
-keyout client_key_app_001.pem \
-x509 \
-days 36500 \
-out client_cert_app_001.pem

This command generates a new client key (client_key_001.pem) and certificate (client_cert_001.pem) with no passphrase.

Step 4. Configure the Redis Database

The next step is to take the certificate and add it to the database you want to protect.

Let's copy the certificate and paste it into the Redis Enterprise Web Console.

Copy the certificate in your clipboard:

Mac:

pbcopy < client_cert_app_001.pem

Linux:

 xclip -sel clip < client_cert_app_001.pem

Windows:

clip < client_cert_app_001.pem

Go to the Redis Enterprise Admin Web Console and enable TLS on your database:

  1. Edit the database configuration
  2. Check TLS
  3. Select "Require TLS for All communications"
  4. Check "Enforce client authentication"
  5. Paste the certificate in the text area
  6. Click the Save button to save the certificate
  7. Click the Update button to save the configuration.

Security Configuration

The database is now protected, and it is mandatory to use the SSL certificate to connect to it.

redis-cli -p 12000 -a secretdb01 INFO SERVER
(error) ERR unencrypted connection is prohibited

Step 5. Connect to the Database using the Certificate

In all these examples, you will be using a "self-signed" certificate, so that you don't need to check the validity of the hostname. You should adapt the connections/TLS information based on your certificate configuration.

Step 5.1 Using Redis-CLI

To connect to a SSL protected database using redis-cli you have to use stunnel.

Create a stunnel.conf file with the following content:

cert = /path_to/certificates/client_cert_app_001.pem
key = /path_to/certificates/client_key_app_001.pem
cafile = /path_to/certificates/proxy_cert.pem
client = yes

[redislabs]
accept = 127.0.0.1:6380
connect = 127.0.0.1:12000

Start stunnel using the command

stunnel ./stunnel.conf

This will start a process that listen to port 6380 and used as a proxy to the Redis Enterprise database on port 12000.

redis-cli -p 6380 -a secretdb01 INFO SERVER

Step 5.2 Using Python

Using Python, you have to set the SSL connection parameters:

#!/usr/local/bin/python3

import redis
import pprint

try:
r = redis.StrictRedis(
password='secretdb01',
decode_responses=True,
host='localhost',
port=12000,
ssl=True,
ssl_keyfile='./client_key_app_001.pem',
ssl_certfile='./client_cert_app_001.pem',
ssl_cert_reqs='required',
ssl_ca_certs='./proxy_cert.pem',
)

info = r.info()
pprint.pprint(info)

except Exception as err:
print("Error connecting to Redis: {}".format(err))

Step 5.3 Using Node.JS

For Node Redis, use the TLS library to configure the client connection:

var redis = require('redis');
var tls = require('tls');
var fs = require('fs');

var ssl = {
key: fs.readFileSync(
'../certificates/client_key_app_001.pem',
{encoding: 'ascii'},
),
cert: fs.readFileSync(
'../certificates/client_cert_app_001.pem',
{encoding: 'ascii'},
),
ca: [fs.readFileSync('../certificates/proxy_cert.pem', {encoding: 'ascii'})],
checkServerIdentity: () => {
return null;
},
};

var client = redis.createClient(12000, '127.0.0.1', {
password: 'secretdb01',
tls: ssl,
});

client.info('SERVER', function (err, reply) {
console.log(reply);
});

More information in the documentation "Using Redis with Node.js".

Step 5.4 Using Java

In Java, to be able to connect using SSL, you have to install all the certificates in the Java environment using the keytool utility.

Create a keystore file that stores the key and certificate you have created earlier:

openssl pkcs12 -export \
-in ./client_cert_app_001.pem \
-inkey ./client_key_app_001.pem \
-out client-keystore.p12 \
-name "APP_01_P12"

As you can see the keystore is used to store the credentials associated with you client; it will be used later with the -javax.net.ssl.keyStore system property in the Java application.

In addition to the keys tore, you also have to create a trust store, that is used to store other credentials for example in our case the redis cluster certificate.

Create a trust store file and add the Redis cluster certificate to it

keytool -genkey \
-dname "cn=CLIENT_APP_01" \
-alias truststorekey \
-keyalg RSA \
-keystore ./client-truststore.p12 \
-keypass secret
-storepass secret
-storetype pkcs12
keytool -import \
-keystore ./client-truststore.p12 \
-file ./proxy_cert.pem \
-alias redis-cluster-crt

The trustore will be used later with the -javax.net.ssl.trustStore system property in the Java application.

You can now run the Java application with the following environment variables:

java -Djavax.net.ssl.keyStore=/path_to/certificates/java/client-keystore.p12 \
-Djavax.net.ssl.keyStorePassword=secret \
-Djavax.net.ssl.trustStore=/path_to/certificates/java/client-truststore.p12 \
-Djavax.net.ssl.trustStorePassword=secret \
-jar MyApp.jar

For this example and simplicity, I will hard code these property in the Java code itself:


import redis.clients.jedis.Jedis;
import java.net.URI;

public class SSLTest {

public static void main(String[] args) {

System.setProperty("javax.net.ssl.keyStore", "/path_to/certificates/client-keystore.p12");
System.setProperty("javax.net.ssl.keyStorePassword", "secret");

System.setProperty("javax.net.ssl.trustStore","/path_to/certificates/client-truststore.p12");
System.setProperty("javax.net.ssl.trustStorePassword","secret");

URI uri = URI.create("rediss://127.0.0.1:12000");

Jedis jedis = new Jedis(uri);
jedis.auth("secretdb01");


System.out.println(jedis.info("SERVER"));
jedis.close();
}

}
  • line 8-12, the system environment variables are set to point to the keystore and trust store (this should be externalized)
  • line 14, the Redis URL start with rediss with 2 s to indicate that the connection should be encrypted
  • line 17, set the database password

More information in the documentation "Using Redis with Java".

Conclusion

In this article, you learnt how to:

  • Retrieve the Redis Server certificate
  • Generate a client certificate
  • Protect your database to enforce transport level security (TLS) with 2 ways authentication
  • Connect to the database from redis-cli, Python, Node and Java

References

- + \ No newline at end of file diff --git a/howtos/shoppingcart/index.html b/howtos/shoppingcart/index.html index d61f164211..d75650fd73 100644 --- a/howtos/shoppingcart/index.html +++ b/howtos/shoppingcart/index.html @@ -4,7 +4,7 @@ How to build a Shopping cart app using NodeJS and Redis | The Home of Redis Developers - + @@ -22,7 +22,7 @@ The most interesting part, at least for now, is located in the src directory(directory structure is shown below):

The main.js file is the main JavaScript file of the application, which will load all common elements and call the App.vue main screen. The App.vue is a file that contains in the HTML, CSS, and JavaScript for a specific page or template. As an entry point for the application, this part is shared by all screens by default, so it is a good place to write the notification-client piece in this file. The public/index.html is the static entry point from where the DOM will be loaded.

Directory Structure:

% tree
.
├── App.vue
├── assets
│ ├── RedisLabs_Illustration.svg
│ └── products
│ ├── 1f1321bb-0542-45d0-9601-2a3d007d5842.jpg
│ ├── 42860491-9f15-43d4-adeb-0db2cc99174a.jpg
│ ├── 63a3c635-4505-4588-8457-ed04fbb76511.jpg
│ ├── 6d6ca89d-fbc2-4fc2-93d0-6ee46ae97345.jpg
│ ├── 97a19842-db31-4537-9241-5053d7c96239.jpg
│ ├── e182115a-63d2-42ce-8fe0-5f696ecdfba6.jpg
│ ├── efe0c7a3-9835-4dfb-87e1-575b7d06701a.jpg
│ ├── f5384efc-eadb-4d7b-a131-36516269c218.jpg
│ ├── f9a6d214-1c38-47ab-a61c-c99a59438b12.jpg
│ └── x341115a-63d2-42ce-8fe0-5f696ecdfca6.jpg
├── components
│ ├── Cart.vue
│ ├── CartItem.vue
│ ├── CartList.vue
│ ├── Info.vue
│ ├── Product.vue
│ ├── ProductList.vue
│ └── ResetDataBtn.vue
├── config
│ └── index.js
├── main.js
├── plugins
│ ├── axios.js
│ └── vuetify.js
├── store
│ ├── index.js
│ └── modules
│ ├── cart.js
│ └── products.js
└── styles
└── styles.scss

8 directories, 27 files

In the client directory, under the subdirectory src, open the file App.vue. You will see the below content:

<template>
<v-app>
<v-container>
<div class="my-8 d-flex align-center">
<div class="pa-4 rounded-lg red darken-1">
<v-icon color="white" size="45">mdi-cart-plus</v-icon>
</div>
<h1 class="ml-6 font-weight-regular">Shopping Cart demo</h1>
</div>
</v-container>

<v-container>
<v-row>
<v-col cols="12" sm="7" md="8">
<info />
<product-list :products="products" />
</v-col>
<v-col cols="12" sm="5" md="4" class="d-flex flex-column">
<cart />
<reset-data-btn class="mt-6" />
</v-col>
</v-row>

<v-footer class="mt-12 pa-0">
© Copyright 2021 | All Rights Reserved Redis
</v-footer>
</v-container>
</v-app>
</template>

<script>
import { mapGetters, mapActions } from 'vuex';
import Cart from '@/components/Cart';
import ProductList from '@/components/ProductList';
import ResetDataBtn from '@/components/ResetDataBtn.vue';
import Info from '@/components/Info';

export default {
name: 'App',

components: {
ProductList,
Cart,
ResetDataBtn,
Info
},

computed: {
...mapGetters({
products: 'products/getProducts'
})
},

async created() {
await this.fetchProducts();
},

methods: {
...mapActions({
fetchProducts: 'products/fetch'
})
}
};
</script>

This is client-side code. Here API returns, among other things, links to icons suitable for use on Maps. If you follow the flow through, you’ll see the map markers are loading those icons directly using the include URLs.

Running/Testing the web client

$ cd client
$ npm run serve

> redis-shopping-cart-client@1.0.0 serve
> vue-cli-service serve

INFO Starting development server...
98% after emitting CopyPlugin

DONE Compiled successfully in 7733ms 7:15:56 AM


App running at:
- Local: http://localhost:8081/
- Network: http://192.168.43.81:8081/

Note that the development build is not optimized.
To create a production build, run npm run build.

Let us click on the first item “256GB Pendrive” and try to check out this product. Once you add it to the cart, you will see the below output using redis-cli monitor command:

1613320256.801562 [0 172.22.0.1:64420] "json.get" "product:97a19842-db31-4537-9241-5053d7c96239"
1613320256.803062 [0 172.22.0.1:64420] "hget"
...
1613320256.805950 [0 172.22.0.1:64420] "json.set" "product:97a19842-db31-4537-9241-5053d7c96239" "." "{\"id\":\"97a19842-db31-4537-9241-5053d7c96239\",\"name\":\"256BG Pendrive\",\"price\":\"60.00\",\"stock\":1}"
1613320256.807792 [0 172.22.0.1:64420] "set" "sess:Ii9njXZd6zeUViL3tKJimN5zU7Samfze"
...
1613320256.823055 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
...
1613320263.232527 [0 172.22.0.1:64420] "hgetall" "cart:bdee1606395f69985e8f8e01d3ada8c4"
1613320263.233752 [0 172.22.0.1:64420] "set" "sess:gXk5K9bobvrR790-HFEoi3bQ2kP9YmjV" "{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.233Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"bdee1606395f69985e8f8e01d3ada8c4\"}" "EX" "10800"
1613320263.240797 [0 172.22.0.1:64420] "scan" "0" "MATCH" "product:*"
1613320263.241908 [0 172.22.0.1:64420] "scan" "22" "MATCH" "product:*"

"{\"cookie\":{\"originalMaxAge\":10800000,\"expires\":\"2021-02-14T19:31:03.254Z\",\"httpOnly\":true,\"path\":\"/\"},\"cartId\":\"4bc231293c5345370f8fab83aff52cf3\"}" "EX" "10800"

Shopping Cart

Conclusion

Storing shopping cart data in Redis is a good idea because it lets you retrieve the data very fast at any time and persist this data if needed. As compared to cookies that store the entire shopping cart data in session that is bloated and relatively slow in operation, storing the shopping cart data in Redis speeds up the shopping cart’s read and write performance , thereby improving the user experience.

Reference

- + \ No newline at end of file diff --git a/howtos/socialnetwork/index.html b/howtos/socialnetwork/index.html index ead9d2b472..bdf99f47f9 100644 --- a/howtos/socialnetwork/index.html +++ b/howtos/socialnetwork/index.html @@ -4,7 +4,7 @@ How to Build a Social Network Application using Redis Stack and NodeJS | The Home of Redis Developers - + @@ -12,7 +12,7 @@

How to Build a Social Network Application using Redis Stack and NodeJS


Profile picture for Julian Mateu
Author:
Julian Mateu, Sr. Backend Software Engineer at Globality, Inc.
Profile picture for Manuel Aguirre
Author:
Manuel Aguirre, Backend Engineer at Baseline Spain

image

In this blog post we’ll build a social network application using Redis Stack and NodeJS. This is the idea that we used for our app Skillmarket.

The goal of the application is to match users with complementary skills. It will allow users to register and provide some information about themselves, like location, areas of expertise and interests. Using search in Redis Stack it will match two users who are geographically close, and have complementary areas of expertise and interests, e.g., one of them knows French and want to learn Guitar and the other knows Guitar and want to learn French.

The full source code of our application can be found in GitHub (note that we used some features like FT.ADD which now are deprecated):

We will be using a more condensed version of the backend which can be found in the Skillmarket Blogpost GitHub repo.

Refer to the official tutorial for more information about search in Redis Stack.

Getting Familiar with search in Redis Stack

Launching search in RedisStack in a Docker container

Let’s start by launching Redis from the Redis Stack image using Docker:

docker run -d --name redis redis/redis-stack:latest

Here we use the docker run command to start the container and pull the image if it is not present. The -d flag tells docker to launch the container in the background (detached mode). We provide a name with --name redis which will allow us to refer to this container with a friendly name instead of the hash or the random name docker will assign to it.

Finally, redislabs/readisearch:latest tells docker to use the latest version of the redislabs/readisearch image

Once the image starts, we can use docker exec to launch a terminal inside the container, using the -it flag (interactive tty) and specifying the redis name provided before when creating the image, and the bash command:

docker exec -it redis bash

Once inside the container, let’s launch a redis-cli instance to familiarize ourselves with the CLI:

redis-cli

You will notice the prompt now indicates we’re connected to 127.0.0.1:6379

Creating Users

We’ll use a Hash as the data structure to store information about our users. This will be a proof of concept, so our application will only use Redis as the data store. For a real life scenario, it would probably be better to have a primary data store which is the authoritative source of user data, and use Redis as the search index which can be used to speed up searches.

In a nutshell, you can think of a hash as a key/value store where the key can be any string we want, and the values are a document with several fields. It’s common practise to use the hash to store many different types of objects, so they can be prefixed with their type, so a key would take the form of "object_type:id".

An index will then be used on this hash data structure, to efficiently search for values of given fields. The following diagram taken from the search docs exeplifies this with a database for movies:

alt_text

Use the help @hash command (or refer to the documentation) to get a list of commands that can be used to manipulate hashes. To get help for a single command, like HSET let’s type help HSET:

127.0.0.1:6379> help hset

HSET key field value [field value ...]
summary: Set the string value of a hash field
since: 2.0.0
group: hash

As we see, we can provide a key and a list of field value pairs.

We’ll create a user in the hash table by using user:id as the key, and we’ll provide the fields expertises, interests and location:

HSET users:1 name "Alice" expertises "piano, dancing" interests "spanish, bowling" location "2.2948552,48.8736537"

HSET users:2 name "Bob" expertises "french, spanish" interests "piano" location "2.2945412,48.8583206"

HSET users:3 name "Charles" expertises "spanish, bowling" interests "piano, dancing" location "-0.124772,51.5007169"

Query to match users

Here we can see the power of the search index, which allows us to query by tags (we provide a list of values, such as interests, and it will return any user whose interests match at least one value in the list), and Geo (we can ask for users whose location is at a given radius in km from a point).

To be able to do this, we have to instruct search to create an index:

FT.CREATE idx:users ON hash PREFIX 1 "users:" SCHEMA interests TAG expertises TAG location GEO

We use the FT.CREATE command to create a full text search index named idx:users. We specify ON hash to indicate that we’re indexing the hash table, and provide PREFIX 1 "users:" to indicate that we should index any document whose key starts with the prefix “users:”. Finally we indicate the SCHEMA of the index by providing a list of fields to index, and their type.

Finally, we can query the index using the FT.SEARCH command (see the query syntax reference):

127.0.0.1:6379> FT.SEARCH idx:users "@interests:{dancing|piano} @expertises:{spanish|bowling} @location:[2.2948552 48.8736537 5 km]"
1) (integer) 1
2) "users:2"
3) 1) "name"
2) "Bob"
3) "expertises"
4) "french, spanish"
5) "interests"
6) "piano"
7) "location"
8) "2.2945412,48.8583206"

In this case we’re looking for matches for Alice, so we use her expertises in the interests field of the query, and her interests in the expertises field. We also search for users in a 5km radius from her location, and we get Bob as a match.

If we expand the search radius to 500km we’ll also see that Charles is returned:

127.0.0.1:6379> FT.SEARCH idx:users "@interests:{dancing|piano} @expertises:{spanish|bowling} @location:[2.2948552 48.8736537 500 km]"
1) (integer) 2
2) "users:3"
3) 1) "name"
2) "Charles"
3) "expertises"
4) "spanish, bowling"
5) "interests"
6) "piano, dancing"
7) "location"
8) "-0.124772,51.5007169"
4) "users:2"
5) 1) "name"
2) "Bob"
3) "expertises"
4) "french, spanish"
5) "interests"
6) "piano"
7) "location"
8) "2.2945412,48.8583206"

Cleaning Up

We can now remove the docker instance and move on to building the web application, running the following command from outside the instance:

 docker rm -f redis

Building a minimal backend in Typescript

After understanding how the index works, let’s build a minimal backend API in NodeJS that will allow us to create a user, and query for matching users.

note

This is just an example, and we’re not providing proper validation or error handling, nor other features required for the backend (e.g. authentication).

Redis client

We’ll use the node-redis package to create a client:

const {
REDIS_PORT = 6379,
REDIS_HOST = 'localhost',
} = process.env;

const client: RediSearchClient = createClient({
port: Number(REDIS_PORT),
host: REDIS_HOST,
});

All the functions in the library use callbacks, but we can use promisify to enable the async/await syntax:

client.hgetallAsync = promisify(client.hgetall).bind(client);
client.hsetAsync = promisify(client.hset).bind(client);
client.ft_createAsync = promisify(client.ft_create).bind(client);
client.ft_searchAsync = promisify(client.ft_search).bind(client);

Finally, let’s define a function to create the user index, as we did before in the CLI example:

async function createUserIndex() {
client.ft_createAsync(
'idx:users',
['ON', 'hash', 'PREFIX', '1', 'users:', 'SCHEMA', 'interests', 'TAG', 'expertises', 'TAG', 'location', 'GEO']
);
}

User controller

Let’s define the functions that the controller will use to expose a simple API on top of Redis. We’ll define 3 functions: - findUserById(userId) - createUser(user) - findMatchesForUser(user)

But first let’s define the model we’ll use for the users:

interface Location {
latitude: number;
longitude: number;
};

interface User {
id?: string;
name: string;
interests: string[];
expertises: string[];
location: Location
};

Let’s start with the function to create a user from the model object:

async function createUser(user: User): Promise<string> {
const id = uuid();
redisearchClient.hsetAsync(`users:${id}`, _userToSetRequestString(user));
return id;
}

function _userToSetRequestString(user: User): string[] {
const { id, location, interests, expertises, ...fields } = user;
let result = Object.entries(fields).flat();
result.push('interests', interests.join(', '));
result.push('expertises', expertises.join(', '));
result.push('location', `${location.longitude},${location.latitude}`);
return result;
}

We will create a UUID for the user, and then transform the TAG and GEO fields to the redis format. Here’s an example of how these two formats look like:

my image

Let’s now look at the logic to retrieve an existing user from the Hash table using HGETALL:

async function findUserById(userId: string): Promise<User> {
const response = await redisearchClient.hgetallAsync(`users:${userId}`);
if (!response) {
throw new Error('User Not Found');
}
return _userFromFlatEntriesArray(userId, Object.entries(response).flat());
}

function _userFromFlatEntriesArray(id: string, flatEntriesArray: any[]): User {
let user: any = {};

// The flat entries array contains all keys and values as elements in an array, e.g.:
// [key1, value1, key2, value2]
for (let j = 0; j < flatEntriesArray.length; j += 2) {
let key: string = flatEntriesArray[ j ];
let value: string = flatEntriesArray[ j + 1 ];
user[ key ] = value;
}

const location: string[] = user.location.split(',');
user.location = { longitude: Number(location[ 0 ]), latitude: Number(location[ 1 ]) };
user.expertises = user.expertises.split(', ');
user.interests = user.interests.split(', ');

return {id, ...user};
}

Here we have the inverse logic, where we want to split the TAG and GEO fields into a model object. There’s also the fact that HGETALL returns the field names and values in an array, and we need to build the model object from that.

Let’s finally take a look at the logic to find matches for a given user:

async function findMatchesForUser(user: User, radiusKm: number): Promise<User[]> {
const allMatches: User[] = await _findMatches(user.interests, user.expertises, user.location, radiusKm);
return allMatches.filter(u => u.id !== user.id);
}

async function _findMatches(expertises: string[], interests: string[], location: Location, radiusKm: number): Promise<User[]> {
let query = `@interests:{${interests.join('|')}}`
query += ` @expertises:{${expertises.join('|')}}`
query += ` @location:[${location.longitude} ${location.latitude} ${radiusKm} km]`;

const response = await redisearchClient.ft_searchAsync('idx:users', query);

return _usersFromSearchResponseArray(response);
}

function _usersFromSearchResponseArray(response: any[]): User[] {
let users = [];

// The search response is an array where the first element indicates the number of results, and then
// the array contains all matches in order, one element is they key and the next is the object, e.g.:
// [2, key1, object1, key2, object2]
for (let i = 1; i <= 2 * response[ 0 ]; i += 2) {
const user: User = _userFromFlatEntriesArray(response[ i ].replace('users:', ''), response[ i + 1 ]);
users.push(user);
}

return users;
}

Here we swap interests and expertises to find the complementary skill set, and we build the query that we used previously in the CLI example. we finally call the FT.SEARCH function, and we build the model object from the response, which comes as an array. Results are filtered to exclude the current user from the matches list.

Web API

Finally, we can build a trivial web API using express, exposing a POST /users endpoint to create a user, a GET /users/:userId endpoint to retrieve a user, and a GET /users/:userId/matches endpoint to find matches for the given user (the desired radiusKm can be optionally specified as a query parameter)

app.post('/users', async (req, res) => {
const user: User = req.body;

if (!user || !user.name || !user.expertises || !user.interests || user.location.latitude === undefined || user.location.longitude === undefined) {
res.status(400).send('Missing required fields');
} else {
const userId = await userController.createUser(user);
res.status(200).send(userId);
}
});

app.get("/users/:userId", async (req, res) => {
try {
const user: User = await userController.findUserById(req.params.userId);
res.status(200).send(user);
} catch (e) {
res.status(404).send();
}
});

app.get("/users/:userId/matches", async (req, res) => {
try {
const radiusKm: number = Number(req.query.radiusKm) || 500;
const user: User = await userController.findUserById(req.params.userId);
const matches: User[] = await userController.findMatchesForUser(user, radiusKm);
res.status(200).send(matches);
} catch (e) {
console.log(e)
res.status(404).send();
}
});

Full code example

The code used in this blogpost can be found in the GitHub repo. The backend together with redis can be launched using docker compose:

 docker compose up -d --build

The backend API will be exposed on port 8080. We can see the logs with docker compose logs, and use a client to query it. Here’s an example using httpie:

http :8080/users \
name="Alice" \
expertises:='["piano", "dancing"]' \
interests:='["spanish", "bowling"]' \
location:='{"longitude": 2.2948552, "latitude": 48.8736537}'

----------
HTTP/1.1 200 OK
Connection: keep-alive
Content-Length: 36
Content-Type: text/html; charset=utf-8
Date: Mon, 01 Nov 2021 05:24:52 GMT
ETag: W/"24-dMinMMphAGzfWiCs49RBYnyK+r8"
Keep-Alive: timeout=5
X-Powered-By: Express

03aef405-ef37-4254-ab3c-a5ddfbc4f04e
http ":8080/users/03aef405-ef37-4254-ab3c-a5ddfbc4f04e/matches?radiusKm=15"
HTTP/1.1 200 OK
Connection: keep-alive
Content-Length: 174
Content-Type: application/json; charset=utf-8
Date: Mon, 01 Nov 2021 05:26:29 GMT
ETag: W/"ae-3k2/swmuFaJd7BNHrkgvS/S+h2g"
Keep-Alive: timeout=5
X-Powered-By: Express
[
{
"expertises": [
"french",
" spanish"
],
"id": "58e81f09-d9fa-4557-9b8f-9f48a9cec328",
"interests": [
"piano"
],
"location": {
"latitude": 48.8583206,
"longitude": 2.2945412
},
"name": "Bob"
}
]

Finally cleanup the environment:

docker compose down --volumes --remove-orphans

References

Redis Launchpad
- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/cache-prefetching/index.html b/howtos/solutions/caching-architecture/cache-prefetching/index.html index c2e2c9638d..296f847fcf 100644 --- a/howtos/solutions/caching-architecture/cache-prefetching/index.html +++ b/howtos/solutions/caching-architecture/cache-prefetching/index.html @@ -4,7 +4,7 @@ How to use Redis for Cache Prefetching Strategy | The Home of Redis Developers - + @@ -13,7 +13,7 @@

How to use Redis for Cache Prefetching Strategy


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

GITHUB CODE

Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

What is cache prefetching?

Cache prefetching is a technique used in database management systems (DBMS) to improve query performance by anticipating and fetching data from the storage subsystem before it is explicitly requested by a query.

There are three main strategies for cache prefetching:

  1. Sequential prefetching: This approach anticipates that data will be accessed in a sequential manner, such as when scanning a table or index. It prefetches the next set of data blocks or pages in the sequence to ensure they are available in cache when needed.
  2. Prefetching based on query patterns: Some database systems can analyze past query patterns to predict which data is likely to be accessed in the future. By analyzing these patterns, the DBMS can prefetch relevant data and have it available in cache when a similar query is executed.
  3. Prefetching based on data access patterns: In some cases, data access patterns can be derived from the application logic or schema design. By understanding these patterns, the database system can prefetch data that is likely to be accessed soon.

This tutorial will cover the third strategy, prefetching based on data access patterns.

Imagine you're building a movie streaming platform. You need to be able to provide your users with a dashboard that allows them to quickly find the movies they want to watch. You have an extensive database filled with movies, and you have them categorized by things like country of origin, genre, language, etc. This data changes infrequently, and is regularly referenced all over your app and by other data. This kind of data that is long-lived and changes infrequently is called "master data."

One ongoing developer challenge is to swiftly create, read, update, and delete master data. You might store your master data in a system of record like a SQL database or document database, and then use Redis as a cache to speed up lookups for that data. Then, when an application requests master data, instead of coming from the system of record, the master data is served from Redis. This is called the "master data-lookup" pattern.

From a developer's point of view, "master data lookup" refers to the process by which master data is accessed in business transactions, in application setup, and any other way that software retrieves the information. Examples of master data lookup include fetching data for user interface (UI) elements (such as drop-down dialogs, select values, multi-language labels), fetching constants, user access control, theme, and other product configuration.

Below you will find a diagram of the data flow for prefetching master data using Redis with MongoDB as the system of record.

pattern

The steps involved in fetching data are as follows:

  1. Read the master data from MongoDB on application startup and store a copy of the data in Redis. This pre-caches the data for fast retrieval. Use a script or a cron job to regularly copy latest master data to Redis.
  2. The application requests master data.
  3. Instead of MongoDB serving the data, the master data will be served from Redis.

Why you should use Redis for cache prefetching

  1. Serve prefetched data at speed: By definition, nearly every application requires access to master or other common data. Pre-caching such frequent data with Redis delivers it to users at high speed.
  2. Support massive tables: Master tables often have millions of records. Searching through them can cause performance bottlenecks. Use Redis to perform real-time search on the large tables to increase performance with sub-millisecond response.
  3. Postpone expensive hardware and software investments: Defer costly infrastructure enhancements by using Redis. Get the performance and scaling benefits without asking the CFO to write a check.
tip

If you use Redis Enterprise, cache prefetching is easier due to its support for JSON and search. You also get additional features such as real-time performance, high scalability, resiliency, and fault tolerance. You can also call upon high-availability features such as Active-Active geo-redundancy.

Cache prefetching in a NodeJS application with Redis and MongoDB

Demo application

The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

GITHUB CODE

Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

Certain fields used in the demo application serve as master data, including movie language, country, genre, and ratings. They are master data because they are required for almost every application transaction. For example, the pop-up dialog (seen below) that appears when a user who wants to add a new movie clicks the movie application plus the icon. The pop-up includes drop-down menus for both country and language. In this case, Redis stores and provides the values.

demo-03

Prefetching data with Redis and MongoDB

The code snippet below is used to prefetch MongoDB JSON documents and store them in Redis (as JSON) using the Redis OM for Node.js library.

async function insertMasterCategoriesToRedis() {
...
const _dataArr = await getMasterCategories(); //from MongoDb
const repository = MasterCategoryRepo.getRepository();

if (repository && _dataArr && _dataArr.length) {
for (const record of _dataArr) {
const entity = repository.createEntity(record);
entity.categoryTag = [entity.category]; //for tag search
//adds JSON to Redis
await repository.save(entity);
}
}
...
}

async function getMasterCategories() {
//fetching data from MongoDb
...
db.collection("masterCategories").find({
statusCode: {
$gt: 0,
},
category: {
$in: ["COUNTRY", "LANGUAGE"],
},
});
...
}

You can also check RedisInsight to verify that JSON data is inserted, as seen below:

Redis-jsonRedis-json
tip

RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

Querying prefetched data from Redis

Prior to prefetching with Redis, the application searched the static database (MongoDB) to retrieve the movie's country and language values. As more people started using the application, the database became overloaded with queries. The application was slow and unresponsive. To solve this problem, the application was modified to use Redis to store the master data. The code snippet below shows how the application queries Redis for the master data, specifically the country and language values for the dropdown menus:

*** With Redis ***
*** Redis OM Node query ***
function getMasterCategories() {
...
masterCategoriesRepository
.search()
.where("statusCode")
.gt(0)
.and("categoryTag")
.containOneOf("COUNTRY", "LANGUAGE");
...
}

Ready to use Redis for cache prefetching?

In this tutorial you learned how to use Redis for cache prefetching with a "master data lookup" example. While this is one way Redis is used in an application, it's possible to incrementally adopt Redis wherever needed with other caching strategies/patterns. For more resources on the topic of caching, check out the links below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html b/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html index 05eb7ea2e8..e943a52566 100644 --- a/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html +++ b/howtos/solutions/caching-architecture/common-caching/caching-movie-app/index.html @@ -4,7 +4,7 @@ caching-movie-app | The Home of Redis Developers - + @@ -13,7 +13,7 @@

caching-movie-app

The demo application used in the rest of this tutorial showcases a movie application with basic create, read, update, and delete (CRUD) operations. demo-01

The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html b/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html index a2562be19b..e5683eb001 100644 --- a/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html +++ b/howtos/solutions/caching-architecture/common-caching/redis-gears/index.html @@ -4,7 +4,7 @@ redis-gears | The Home of Redis Developers - + @@ -12,7 +12,7 @@

redis-gears

What is RedisGears?

RedisGears is a programmable serverless engine for transaction, batch, and event-driven data processing allowing users to write and run their own functions on data stored in Redis.

Functions can be implemented in different languages, including Python and C, and can be executed by the RedisGears engine in one of two ways:

  1. Batch: triggered by the Run action, execution is immediate and on existing data
  2. Event: triggered by the Register action, execution is triggered by new events and on their data

Some batch type operations RedisGears can do:

  • Run an operation on all keys in the KeySpace or keys matching a certain pattern like :
    • Prefix all KeyNames with person:
    • Delete all keys whose value is smaller than zero
    • Write all the KeyNames starting with person: to a set
  • Run a set of operations on all(or matched) keys where the output of one operation is the input of another like
    • Find all keys with a prefix person: (assume all of them are of type hash)
    • Increase user's days_old by 1, then sum them by age group (10-20, 20-30 etc.)
    • Add today's stats to the sorted set of every client, calculate last 7 days average and save the computed result in a string

Some event type operations RedisGears can do:

  • RedisGears can also register event listeners that will trigger a function execution every time a watched key is changed like
    • Listen for all operations on all keys and keep a list of all KeyNames in the KeySpace
    • Listen for DEL operations on keys with a prefix I-AM-IMPORTANT: and asynchronously dump them in a "deleted keys" log file
    • Listen for all HINCRBY operations on the element score of keys with a prefix player: and synchronously update a user's level when the score reaches 1000

How do I use RedisGears?

Run the Docker container:

docker run -p 6379:6379 redislabs/redisgears:latest

For a very simple example that lists all keys in your Redis database with a prefix of person: create the following python script and name it hello_gears.py :

gb = GearsBuilder() gb.run('person:*')

Execute your function:

docker exec -i redisgears redis-cli RG.PYEXECUTE "`cat hello_gears.py`"

Using gears-cli

The gears-cli tool provides an easier way to execute RedisGears functions, specially if you need to pass some parameters too.

It's written in Python and can be installed with pip:

pip install gears-cli
gears-cli hello_gears.py REQUIREMENTS rgsync

Usage:

gears-cli --help
usage: gears-cli [-h] [--host HOST] [--port PORT]
[--requirements REQUIREMENTS] [--password PASSWORD] path [extra_args [extra_args ...]]

RedisGears references

- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html b/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html index 9a020b0d5c..e98c9304f7 100644 --- a/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html +++ b/howtos/solutions/caching-architecture/common-caching/source-code-movie-app/index.html @@ -4,7 +4,7 @@ source-code-movie-app | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html b/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html index 6a6368acd9..b60adb12f1 100644 --- a/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html +++ b/howtos/solutions/caching-architecture/common-caching/write-behind-vs-write-through/index.html @@ -4,7 +4,7 @@ write-behind-vs-write-through | The Home of Redis Developers - + @@ -12,7 +12,7 @@

write-behind-vs-write-through

There are two related write patterns, and the main differences between them are as follows

Write BehindWrite through
Syncs data asynchronouslySyncs data synchronously/ immediately
Data between the cache and the system of record (database) is inconsistent for a short timeData between the cache and the system of record (database) is always consistent
- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/write-behind/index.html b/howtos/solutions/caching-architecture/write-behind/index.html index de9b91bea6..b5588d3a8b 100644 --- a/howtos/solutions/caching-architecture/write-behind/index.html +++ b/howtos/solutions/caching-architecture/write-behind/index.html @@ -4,7 +4,7 @@ How to use Redis for Write-behind Caching | The Home of Redis Developers - + @@ -15,7 +15,7 @@ demo-01

The movie application dashboard contains a search section at the top and a list of movie cards in the middle. The floating plus icon displays a pop-up when the user selects it, permitting the user to enter new movie details. The search section has a text search bar and a toggle link between text search and basic (that is, form-based) search. Each movie card has edit and delete icons, which are displayed when a mouse hovers over the card.

GITHUB CODE

Below are the commands to clone the source code (frontend and backend) for the application used in this tutorial

git clone https://github.com/redis-developer/ebook-speed-mern-frontend.git

git clone https://github.com/redis-developer/ebook-speed-mern-backend.git

To demonstrate this pattern using the movie application, imagine that the user opens the pop-up to add a new movie.

demo-02

Instead of the application immediately storing the data in MongoDB, the application writes the changes to Redis. In the background, RedisGears automatically synchronizes the data with the MongoDB database.

Programming Redis using the write-behind pattern

Developers need to load some code (say python in our example) to the Redis server before using the write-behind pattern (which syncs data from Redis to MongoDB). The Redis server has a RedisGears module that interprets the python code and syncs the data from Redis to MongoDB.

Loading the Python code is easier than it sounds. Simply replace database details in the Python file and then load the file to the Redis server.

Create the Python file (shown below, and available online). Then update the MongoDB connection details, database, collection, and primary key name to sync.

movies-write-behind.py
# Gears Recipe for a single write behind

# import redis gears & mongo db libs
from rgsync import RGJSONWriteBehind, RGJSONWriteThrough
from rgsync.Connectors import MongoConnector, MongoConnection

# change mongodb connection (admin)
# mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/dbSpeedMernDemo?authSource=admin
mongoUrl = 'mongodb://usrAdmin:passwordAdmin@10.10.20.2:27017/admin'

# MongoConnection(user, password, host, authSource?, fullConnectionUrl?)
connection = MongoConnection('', '', '', '', mongoUrl)

# change MongoDB database
db = 'dbSpeedMernDemo'

# change MongoDB collection & it's primary key
movieConnector = MongoConnector(connection, db, 'movies', 'movieId')

# change redis keys with prefix that must be synced with mongodb collection
RGJSONWriteBehind(GB, keysPrefix='MovieEntity',
connector=movieConnector, name='MoviesWriteBehind',
version='99.99.99')
What is a RedisGears recipe?

A collection of RedisGears functions and any dependencies they may have that implement a high-level functional purpose is called a recipe. Example : "RGJSONWriteBehind" function in above python code

There are two ways to load that Python file into the Redis server:

  1. Using the gears command-line interface (CLI)

Find more information about the Gears CLI at gears-cli and rgsync.

# install
pip install gears-cli
# If python file is located at “/users/tom/movies-write-behind.py”
gears-cli --host <redisHost> --port <redisPort> --password <redisPassword> run /users/tom/movies-write-behind.py REQUIREMENTS rgsync pymongo==3.12.0
  1. Using the RG.PYEXECUTE from the Redis command line.

Find more information at RG.PYEXECUTE.

# Via redis cli
RG.PYEXECUTE 'pythonCode' REQUIREMENTS rgsync pymongo==3.12.0

The RG.PYEXECUTE command can also be executed from the Node.js code (Consult the sample Node file for more details)

Find more examples at Redis Gears sync with MongoDB.

Verifying the write-behind pattern using RedisInsight

tip

RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

The next step is to verify that RedisGears is syncing data between Redis and MongoDB.

Insert a key starting with the prefix (that's specified in the Python file) using the Redis CLI

redis-insight

Next, confirm that the JSON is inserted in MongoDB too.

mongo-compass

You can also check RedisInsight to verify that the data is piped in via Streams for its consumers (like RedisGears).

redis-insight-stream

How does all that work with the demo application? Below is a code snipped to insert a movie. Once data is written to Redis, RedisGears automatically synchronizes it to MongoDB.

BEFORE (using MongoDB)
...
//(Node mongo query)
if (movie) {
//insert movie to MongoDB
await db.collection("movies")
.insertOne(movie);
}
...
AFTER (using Redis)
...
//(Redis OM Node query)
if (movie) {
const entity = repository.createEntity(movie);
//insert movie to Redis
await moviesRepository.save(entity);
}
...

Ready to use Redis for write-behind caching?

You now know how to use Redis for write-behind caching. It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of caching, check out the links below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/caching-architecture/write-through/index.html b/howtos/solutions/caching-architecture/write-through/index.html index 23e0b4027a..e1ccf9aade 100644 --- a/howtos/solutions/caching-architecture/write-through/index.html +++ b/howtos/solutions/caching-architecture/write-through/index.html @@ -4,7 +4,7 @@ How to use Redis for Write through caching strategy | The Home of Redis Developers - + @@ -15,7 +15,7 @@ So you need a way of quickly providing strong consistency of user data. In such situation, What you need is called the "write-through pattern."

With the Write-through pattern, every time an application writes data to the cache, it also updates the records in the database, unlike Write behind the thread waits in this pattern until the write to the database is also completed.

Below is a diagram of the write-through pattern for the application:

write-through-pattern using Redis in a movie streaming application

The pattern works as follows:

  1. The application reads and writes data to Redis.
  2. Redis syncs any changed data to the PostgreSQL database synchronously/ immediately.

Note : the Redis server is blocked until a response from the main database is received.

There are two related write patterns, and the main differences between them are as follows

Write BehindWrite through
Syncs data asynchronouslySyncs data synchronously/ immediately
Data between the cache and the system of record (database) is inconsistent for a short timeData between the cache and the system of record (database) is always consistent

Learn more about Write behind pattern

Why you should use Redis for write-through caching

Write-through caching with Redis ensures that the (critical data) cache is always up-to-date with the database, providing strong consistency and improving application performance.

consider below scenarios of different applications :

  • E-commerce application: In an e-commerce application, write-through caching can be used to ensure consistency of product inventory. Whenever a customer purchases a product, the inventory count should be updated immediately to avoid overselling. Redis can be used to cache the inventory count, and every update to the count can be written through to the database. This ensures that the inventory count in the cache is always up-to-date, and customers are not able to purchase items that are out of stock.

  • Banking application: In a banking application, write-through caching can be used to ensure consistency of account balances. Whenever a transaction is made, the account balance should be updated immediately to avoid overdrafts or other issues. Redis can be used to cache the account balances, and every transaction can be written through to the database. This ensures that the balance in the cache is always up-to-date, and transactions can be processed with strong consistency.

  • Online gaming platform: Suppose you have an online gaming platform where users can play games against each other. With write-through caching, any changes made to a user's score or game state would be saved to the database and also cached in Redis. This ensures that any subsequent reads for that user's score or game state would hit the cache first. This helps to reduce the load on the database and ensures that the game state displayed to users is always up-to-date.

  • Claims Processing System: In an insurance claims processing system, claims data needs to be consistent and up-to-date across different systems and applications. With write-through caching in Redis, new claims data can be written to both the database and Redis cache. This ensures that different applications always have the most up-to-date information about the claims, making it easier for claims adjusters to access the information they need to process claims more quickly and efficiently.

  • Healthcare Applications: In healthcare applications, patient data needs to be consistent and up-to-date across different systems and applications. With write-through caching in Redis, updated patient data can be written to both the database and Redis cache, ensuring that different applications always have the latest patient information. This can help improve patient care by providing accurate and timely information to healthcare providers.

  • Social media application: In a social media application, write-through caching can be used to ensure consistency of user profiles. Whenever a user updates their profile, the changes should be reflected immediately to avoid showing outdated information to other users. Redis can be used to cache the user profiles, and every update can be written through to the database. This ensures that the profile information in the cache is always up-to-date, and users can see accurate information about each other.

Redis programmability for write-through caching using RedisGears

tip

You can skip reading this section if you are already familiar with RedisGears)

What is RedisGears?

RedisGears is a programmable serverless engine for transaction, batch, and event-driven data processing allowing users to write and run their own functions on data stored in Redis.

Functions can be implemented in different languages, including Python and C, and can be executed by the RedisGears engine in one of two ways:

  1. Batch: triggered by the Run action, execution is immediate and on existing data
  2. Event: triggered by the Register action, execution is triggered by new events and on their data

Some batch type operations RedisGears can do:

  • Run an operation on all keys in the KeySpace or keys matching a certain pattern like :
    • Prefix all KeyNames with person:
    • Delete all keys whose value is smaller than zero
    • Write all the KeyNames starting with person: to a set
  • Run a set of operations on all(or matched) keys where the output of one operation is the input of another like
    • Find all keys with a prefix person: (assume all of them are of type hash)
    • Increase user's days_old by 1, then sum them by age group (10-20, 20-30 etc.)
    • Add today's stats to the sorted set of every client, calculate last 7 days average and save the computed result in a string

Some event type operations RedisGears can do:

  • RedisGears can also register event listeners that will trigger a function execution every time a watched key is changed like
    • Listen for all operations on all keys and keep a list of all KeyNames in the KeySpace
    • Listen for DEL operations on keys with a prefix I-AM-IMPORTANT: and asynchronously dump them in a "deleted keys" log file
    • Listen for all HINCRBY operations on the element score of keys with a prefix player: and synchronously update a user's level when the score reaches 1000

How do I use RedisGears?

Run the Docker container:

docker run -p 6379:6379 redislabs/redisgears:latest

For a very simple example that lists all keys in your Redis database with a prefix of person: create the following python script and name it hello_gears.py :

gb = GearsBuilder() gb.run('person:*')

Execute your function:

docker exec -i redisgears redis-cli RG.PYEXECUTE "`cat hello_gears.py`"

Using gears-cli

The gears-cli tool provides an easier way to execute RedisGears functions, specially if you need to pass some parameters too.

It's written in Python and can be installed with pip:

pip install gears-cli
gears-cli hello_gears.py REQUIREMENTS rgsync

Usage:

gears-cli --help
usage: gears-cli [-h] [--host HOST] [--port PORT]
[--requirements REQUIREMENTS] [--password PASSWORD] path [extra_args [extra_args ...]]

RedisGears references

Programming Redis using the write-through pattern

For our sample code, we will demonstrate writing users to Redis and then writing through to PostgreSQL. Use the docker-compose.yml file below to setup required environment:

docker-compose.yml
version: '3.9'
services:
redis:
container_name: redis
image: 'redislabs/redismod:latest'
ports:
- 6379:6379
deploy:
replicas: 1
restart_policy:
condition: on-failure
postgres:
image: postgres
restart: always
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: password
POSTGRES_DB: example
adminer:
image: adminer
restart: always
ports:
- 8080:8080

To run the docker-compose file, run the following command:

$ docker compose up -d

This will create a Redis server, a PostgreSQL server, and an Adminer server. Adminer is a web-based database management tool that allows you to view and edit data in your database.

Next, open your browser to http://localhost:8080/?pgsql=postgres&username=root&db=example&ns=public&sql=. You will have to input the password (which is password in the example above),

adminer-login

then you will be taken to a SQL command page. Run the following SQL command to create a table:

users.sql
CREATE TABLE users (
id SERIAL PRIMARY KEY,
username VARCHAR(255) UNIQUE NOT NULL,
email VARCHAR(255) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
first_name VARCHAR(255),
last_name VARCHAR(255),
date_of_birth DATE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);

adminer-table-creation

Developers need to load some code (say python in our example) to the Redis server before using the write-through pattern (which syncs data from Redis to the system of record). Redis server has a RedisGears module that interprets the python code and syncs the data from Redis to the system of record.

Now, we need to create a RedisGears recipe that will write through to the PostgreSQL database. The following Python code will write through to the PostgreSQL database:

write-through.py
from rgsync import RGWriteThrough
from rgsync.Connectors import PostgresConnector, PostgresConnection

'''
Create Postgres connection object
'''
connection = PostgresConnection('root', 'password', 'postgres:5432/example')

'''
Create Postgres users connector
'''
usersConnector = PostgresConnector(connection, 'users', 'id')

usersMappings = {
'username': 'username',
'email': 'email',
'pwhash': 'password_hash',
'first': 'first_name',
'last': 'last_name',
'dob': 'date_of_birth',
'created_at': 'created_at',
'updated_at': 'updated_at',
}

RGWriteThrough(GB, keysPrefix='__', mappings=usersMappings,
connector=usersConnector, name='UsersWriteThrough', version='99.99.99')

Make sure you create the file "write-through.py" because the next instructions will use it. For the purpose of this example we are showing how to map Redis hash fields to PostgreSQL table columns. The RGWriteThrough function takes in the usersMapping, where the keys are the Redis hash keys and the values are the PostgreSQL table columns.

What is a RedisGears recipe?

A collection of RedisGears functions and any dependencies they may have that implement a high-level functional purpose is called a recipe. Example : "RGJSONWriteThrough" function in above python code

The python file has a few dependencies in order to work. Below is the requirements.txt file that contains the dependencies, create it alongside the "write-through.py" file:

requirements.txt
rgsync
psycopg2-binary
cryptography

There are two ways (gears CLI and RG.PYEXECUTE) to load that Python file into the Redis server:

  1. Using the gears command-line interface (CLI)

Find more information about the Gears CLI at gears-cli and rgsync.

# install
pip install gears-cli

To run our write-through recipe using gears-cli, we need to run the following command:

$ gears-cli run --host localhost --port 6379 write-through.py --requirements requirements.txt

You should get a response that says "OK". That is how you know you have successfully loaded the Python file into the Redis server.

tip

If you are on Windows, we recommend you use WSL to install and use gears-cli.

  1. Using the RG.PYEXECUTE from the Redis command line.
# Via redis cli
RG.PYEXECUTE 'pythonCode' REQUIREMENTS rgsync psycopg2-binary cryptography
tip

The RG.PYEXECUTE command can also be executed from the Node.js code (Consult the sample Node file for more details)

tip

Find more examples in the Redis Gears GitHub repository.

Verifying the write-through pattern using RedisInsight

tip

RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

The next step is to verify that RedisGears is syncing data between Redis and PostgreSQL. Note that in our Python file we specified a prefix for the keys. In this case, we specified __ as the prefix, users as the table, and id as the unique identifier. This instructs RedisGears to look for the following key format: __{users:<id>}. Try running the following command in the Redis command line:

hset __{users:1} username john email john@gmail.com pwhash d1e8a70b5ccab1dc2f56bbf7e99f064a660c08e361a35751b9c483c88943d082 first John last Doe dob 1990-01-01 created_at 2023-04-20 updated_at 2023-04-20

redis-hash-insert

Check RedisInsight to verify that the hash value made it into Redis. After RedisGears is done processing the __{users:1} key, it will be deleted from Redis and replaced by the users:1 key. Check RedisInsight to verify that the users:1 key is in Redis.

redis-hash-view

Next, confirm that the user is inserted in PostgreSQL too by opening up the select page in Adminer. You should see the user inserted in the table.

adminer-hash-view

This is how you can use RedisGears to write through to PostgreSQL, and so far we have only added a hash key. You can also update specific hash fields and it will be reflected in your PostgreSQL database. Run the following command to update the username field:

> hset __{users:1} username bar

redis-hash-update

In RedisInsight, verify that the username field is updated

redis-hash-updated-view

Now go into Adminer and check the username field. You should see that it has been updated to bar.

adminer-updated-hash-view

Ready to use Redis for write-through caching?

You now know how to use Redis for write-through caching. It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of caching, check out the links below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html b/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html index 3d6ba8727a..eb770fe224 100644 --- a/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html +++ b/howtos/solutions/fraud-detection/common-fraud/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/digital-identity-validation/index.html b/howtos/solutions/fraud-detection/digital-identity-validation/index.html index 962a55e894..94d9b57736 100644 --- a/howtos/solutions/fraud-detection/digital-identity-validation/index.html +++ b/howtos/solutions/fraud-detection/digital-identity-validation/index.html @@ -4,7 +4,7 @@ How to Handle Digital Identity Validation Using Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ Validation Identity as JSON
Caveat

Even though you may receive a score of “1” this only means the score has matched 100% against the measured properties only. We are only measuring digital aspects of the identity, which can be compromised. In a real-world scenario you would want to measure more characteristics like location, device type, session, etc. This is in addition to other contextual information for a complete transaction risk score.

E-commerce application frontend using Next.js and Tailwind

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

Building a digital identity validation microservice with redis

Now, let's go step-by-step through the process of storing, scoring, and validating digital identities using redis with some example code. For demo purposes, we are only using a few characteristics of a user's digital identity like IP address, browser fingerprint, and session. In a real-world application you should store more characteristics like location, device type, and prior actions taken for better risk assessment and identity completeness.

Storing digital identities in redis in a microservices architecture

  1. login service: stores the (user) digital identity as a INSERT_LOGIN_IDENTITY stream entry to redis
//addLoginToTransactionStream
const userId = 'USR_4e7acc44-e91e-4c5c-9112-bdd99d799dd3'; //from session
const sessionId = 'SES_94ff24a8-65b5-4795-9227-99906a43884e'; //from session
const persona = 'GRANDFATHER'; //from session

const entry: ITransactionStreamMessage = {
action: TransactionStreamActions.INSERT_LOGIN_IDENTITY,
logMessage: `[${REDIS_STREAMS.CONSUMERS.IDENTITY}] Digital identity to be stored for the user ${userId}`,
userId,
persona,
sessionId,

identityBrowserAgent: req.headers['user-agent'],
identityIpAddress:
req.headers['x-forwarded-for']?.toString() || req.socket.remoteAddress,
transactionPipeline: JSON.stringify(TransactionPipelines.LOGIN),
};

const nodeRedisClient = getNodeRedisClient();
const streamKeyName = 'TRANSACTION_STREAM';
const id = '*'; //* = auto generate
await nodeRedisClient.xAdd(streamKeyName, id, entry);
  1. digital identity service: reads the identity from the INSERT_LOGIN_IDENTITY stream
interface ListenStreamOptions {
streams: {
streamKeyName: string;
eventHandlers: {
[messageAction: string]: IMessageHandler;
};
}[];
groupName: string;
consumerName: string;
maxNoOfEntriesToReadAtTime?: number;
}

// Below is some code for how you would use redis to listen for the stream events:

const listenToStreams = async (options: ListenStreamOptions) => {
/*
(A) create consumer group for the stream
(B) read set of messages from the stream
(C) process all messages received
(D) trigger appropriate action callback for each message
(E) acknowledge individual messages after processing
*/
const nodeRedisClient = getNodeRedisClient();
if (nodeRedisClient) {
const streams = options.streams;
const groupName = options.groupName;
const consumerName = options.consumerName;
const readMaxCount = options.maxNoOfEntriesToReadAtTime || 100;
const idInitialPosition = '0'; //0 = start, $ = end or any specific id
const streamKeyIdArr: {
key: string;
id: string;
}[] = [];

streams.map(async (stream) => {
LoggerCls.info(
`Creating consumer group ${groupName} in stream ${stream.streamKeyName}`,
);

try {
// (A) create consumer group for the stream
await nodeRedisClient.xGroupCreate(
stream.streamKeyName,
groupName,
idInitialPosition,
{
MKSTREAM: true,
},
);
} catch (err) {
LoggerCls.error(
`Consumer group ${groupName} already exists in stream ${stream.streamKeyName}!`,
);
}

streamKeyIdArr.push({
key: stream.streamKeyName,
id: '>', // Next entry ID that no consumer in this group has read
});
});

LoggerCls.info(`Starting consumer ${consumerName}.`);

while (true) {
try {
// (B) read set of messages from different streams
const dataArr = await nodeRedisClient.xReadGroup(
commandOptions({
isolated: true,
}),
groupName,
consumerName,
//can specify multiple streams in array [{key, id}]
streamKeyIdArr,
{
COUNT: readMaxCount, // Read n entries at a time
BLOCK: 5, //block for 0 (infinite) seconds if there are none.
},
);

// dataArr = [
// {
// name: 'streamName',
// messages: [
// {
// id: '1642088708425-0',
// message: {
// key1: 'value1',
// },
// },
// ],
// },
// ];

//(C) process all messages received
if (dataArr && dataArr.length) {
for (let data of dataArr) {
for (let messageItem of data.messages) {
const streamKeyName = data.name;

const stream = streams.find(
(s) => s.streamKeyName == streamKeyName,
);

if (stream && messageItem.message) {
const streamEventHandlers = stream.eventHandlers;
const messageAction = messageItem.message.action;
const messageHandler = streamEventHandlers[messageAction];

if (messageHandler) {
// (D) trigger appropriate action callback for each message
await messageHandler(messageItem.message, messageItem.id);
}
//(E) acknowledge individual messages after processing
nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
}
}
}
} else {
// LoggerCls.info('No new stream entries.');
}
} catch (err) {
LoggerCls.error('xReadGroup error !', err);
}
}
}
};

// `listenToStreams` listens for events and calls the appropriate callback to further handle the events.
listenToStreams({
streams: [
{
streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
eventHandlers: {
[TransactionStreamActions.INSERT_LOGIN_IDENTITY]: insertLoginIdentity,
//...
},
},
],
groupName: REDIS_STREAMS.GROUPS.IDENTITY,
consumerName: REDIS_STREAMS.CONSUMERS.IDENTITY,
});
  1. digital identity service: stores the identity as JSON to redis
const insertLoginIdentity: IMessageHandler = async (
message: ITransactionStreamMessage,
messageId,
) => {
LoggerCls.info(`Adding digital identity to redis for ${message.userId}`);

// add login digital identity to redis
const insertedKey = await addDigitalIdentityToRedis(message);

//...
};

const addDigitalIdentityToRedis = async (
message: ITransactionStreamMessage,
) => {
let insertedKey = '';

const userId = message.userId;
const digitalIdentity: IDigitalIdentity = {
action: message.action,
userId: userId,
sessionId: message.sessionId,

ipAddress: message.identityIpAddress,
browserFingerprint: crypto
.createHash('sha256')
.update(message.identityBrowserAgent)
.digest('hex'),
identityScore: message.identityScore ? message.identityScore : '',

createdOn: new Date(),
createdBy: userId,
statusCode: DB_ROW_STATUS.ACTIVE,
};

const repository = digitalIdentityRepo.getRepository();
if (repository) {
const entity = repository.createEntity(digitalIdentity);
insertedKey = await repository.save(entity);
}

return insertedKey;
};

Validating digital identities using redis in a microservices architecture

  1. orders service: stores the digital identity to be validated in a CALCULATE_IDENTITY_SCORE redis stream
//adding Identity To TransactionStream
const userId = 'USR_4e7acc44-e91e-4c5c-9112-bdd99d799dd3';
const sessionId = 'SES_94ff24a8-65b5-4795-9227-99906a43884e';
let orderDetails = {
orderId: '63f5f8dc3696d145a45775a6',
orderAmount: '1000',
userId: userId,
sessionId: sessionId,
orderStatus: 1,
products: order.products, //array of product details
};

const entry: ITransactionStreamMessage = {
action: 'CALCULATE_IDENTITY_SCORE',
logMessage: `Digital identity to be validated/ scored for the user ${userId}`,
userId: userId,
sessionId: sessionId,
orderDetails: orderDetails ? JSON.stringify(orderDetails) : '',
transactionPipeline: JSON.stringify(TransactionPipelines.CHECKOUT),

identityBrowserAgent: req.headers['user-agent'],
identityIpAddress:
req.headers['x-forwarded-for']?.toString() || req.socket.remoteAddress,
};

const nodeRedisClient = getNodeRedisClient();
const streamKeyName = 'TRANSACTION_STREAM';
const id = '*'; //* = auto generate
await nodeRedisClient.xAdd(streamKeyName, id, entry);
  1. Digital identity service reads the identity from the CALCULATE_IDENTITY_SCORE stream
listenToStreams({
streams: [
{
streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
eventHandlers: {
// ...
[TransactionStreamActions.CALCULATE_IDENTITY_SCORE]:
scoreDigitalIdentity,
},
},
],
groupName: REDIS_STREAMS.GROUPS.IDENTITY,
consumerName: REDIS_STREAMS.CONSUMERS.IDENTITY,
});

const scoreDigitalIdentity: IMessageHandler = async (
message: ITransactionStreamMessage,
messageId,
) => {
LoggerCls.info(`Scoring digital identity for ${message.userId}`);

//step 1 - calculate score for validation digital identity
const identityScore = await calculateIdentityScore(message);
message.identityScore = identityScore.toString();

LoggerCls.info(`Adding digital identity to redis for ${message.userId}`);
//step 2 - add validation digital identity to redis
const insertedKey = await addDigitalIdentityToRedis(message);

// ...
};

const calculateIdentityScore = async (message: ITransactionStreamMessage) => {
// Compare the "digital identity" with previously stored "login identities" and determine the identity score

let identityScore = 0;
const repository = digitalIdentityRepo.getRepository();

if (message && message.userId && repository) {
let queryBuilder = repository
.search()
.where('userId')
.eq(message.userId)
.and('action')
.eq('INSERT_LOGIN_IDENTITY')
.and('statusCode')
.eq(DB_ROW_STATUS.ACTIVE);

//console.log(queryBuilder.query);
const digitalIdentities = await queryBuilder.return.all();

if (digitalIdentities && digitalIdentities.length) {
//if browser details matches -> +1 score
const matchBrowserItems = digitalIdentities.filter((_digIdent) => {
let identityBrowserAgentHash = crypto
.createHash('sha256')
.update(message.identityBrowserAgent)
.digest('hex');
return _digIdent.browserFingerprint == identityBrowserAgentHash;
});
if (matchBrowserItems.length > 0) {
identityScore += 1;
}

//if IP address matches -> +1 score
const matchIpAddressItems = digitalIdentities.filter((_digIdent) => {
return _digIdent.ipAddress == message.identityIpAddress;
});
if (matchIpAddressItems.length > 0) {
identityScore += 1;
}
}
}

//calculate average score
const noOfIdentityCharacteristics = 2; //2 == browserFingerprint, ipAddress
identityScore = identityScore / noOfIdentityCharacteristics;
return identityScore; // identityScore final value ranges between 0 (no match) and 1 (full match)
};
  1. digital identity service: stores the identity with score as JSON in redis
const addDigitalIdentityToRedis = async (
message: ITransactionStreamMessage,
) => {
let insertedKey = '';

const userId = message.userId;
const digitalIdentity: IDigitalIdentity = {
action: message.action,
userId: userId,
sessionId: message.sessionId,

ipAddress: message.identityIpAddress,
browserFingerprint: crypto
.createHash('sha256')
.update(message.identityBrowserAgent)
.digest('hex'),
identityScore: message.identityScore ? message.identityScore : '',

createdOn: new Date(),
createdBy: userId,
statusCode: DB_ROW_STATUS.ACTIVE, //1
};

const repository = digitalIdentityRepo.getRepository();
if (repository) {
const entity = repository.createEntity(digitalIdentity);
insertedKey = await repository.save(entity);
}

return insertedKey;
};

Conclusion

Now you have learned how to use redis to setup ongoing digital identity monitoring and scoring in a microservices application. This is also called "dynamic digital identity monitoring." Dynamic digital identities are constantly updated based on the information available from each digital transaction. By analyzing these transactions, businesses can build a comprehensive and up-to-date digital identity that includes both static and dynamic elements. These identities can then be scored to determine the risk that they pose to the business.

In addition to increasing security, digital identities can also improve the customer experience. By using the digital footprint left by a user, businesses can offer more personalized services and reduce friction in the authentication process.

Digital identity systems are typically designed to be interoperable and scalable, allowing for seamless integration with various applications and platforms.

Additional Resources

- + \ No newline at end of file diff --git a/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html b/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html index 6ad0a225cd..f232f35c39 100644 --- a/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html +++ b/howtos/solutions/fraud-detection/transaction-risk-scoring/index.html @@ -4,7 +4,7 @@ How to use Redis for Transaction risk scoring | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to use Redis for Transaction risk scoring


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

What is transaction risk scoring

"Transaction risk scoring" is a method of leveraging data science, machine learning, and statistical analysis to continuously monitor transactions and assess the relative risk associated with each transaction. By comparing transactional data to models of known fraud, the risk score can be calculated, and the closer a transaction matches fraudulent behaviour, the higher the risk score.

The score is typically based on a statistical analysis of historical transaction data to identify patterns and trends associated with fraudulent activity. The score can then be used to trigger alerts or to automatically decline transactions that exceed a certain risk threshold. It can also be used to trigger additional authentication steps for high-risk transactions. Additional steps might include a one-time password (OTP) sent via text, email, or biometric scan.

tip

Transaction risk scoring is often combined in a single system with other fraud detection methods, such as digital identity validation.

Why you should use redis for transaction risk scoring

A risk-based approach must be designed to create a frictionless flow and avoid slowing down the transaction experience for legitimate customers while simultaneously preventing fraud. If your risk-based approach is too strict, it will block legitimate transactions and frustrate customers. If it is too lenient, it will allow fraudulent transactions to go through.

How to avoid false positives with rules engines

Rules-based automated fraud detection systems operate on simple "yes or no" logic to determine whether a given transaction is likely to be fraudulent. An example of a rule would be "block all transactions over $500 from a risky region". With a simple binary decision like this, the system is likely to block a lot of genuine customers. Sophisticated fraudsters easily fool such systems, and the complex nature of fraud means that simple "yes or no" rules may not be enough to assess the risk of each transaction accurately.

More accurate risk scoring with AI/ML addresses these issues. Modern fraud detection systems use machine learning models trained on large volumes of different data sets known as "features"(user profiles, transaction patterns, behavioural attributes and more) to accurately identify fraudulent transactions. These models have been designed to be flexible, so they can adapt to new types of fraud. For example, a neural network can examine suspicious activities like how many pages a customer browses before making an order, whether they are copying and pasting information or typing it in manually and flag the customer for further review.

The models use historical as well as most recent data to create a risk profile for each customer. By analyzing past behaviour it is possible to create a profile of what is normal for each customer. Any transactions that deviate from this profile can be flagged as suspicious, reducing the likelihood of false positives. The models are very fast to adapt to changes in normal behaviour too, and can quickly identify patterns of fraud transactions.

This is exactly where Redis Enterprise excels in transaction risk scoring.

How to use Redis Enterprise for transaction risk scoring

People use Redis Enterprise as the in-memory online feature store for online and real-time access to feature data as part of a transaction risk scoring system. By serving online features with low latency, Redis Enterprise enables the risk-scoring models to return results in real-time, thereby allowing the whole system to achieve high accuracy and instant response on approving legitimate online transactions.

Another very common use for Redis Enterprise in transaction risk scoring is for transaction filters. A transaction filter can be implemented as a Bloom filter that stores information about user behaviours. It can answer questions like "Have we seen this user purchase at this merchant before?" Or, "Have we seen this user purchase at this merchant in the X to Y price range before?" Being a probabilistic data structure, Redis Bloom filters do, indeed, sacrifice some accuracy, but in return, they get a very low memory footprint and response time.

tip

You might ask why not use a Redis Set to answer some of the questions above. Redis Sets are used to store unordered collections of unique strings (members). They are very efficient, with most operations taking O(1) time complexity. However, the SMEMBERS command is O(N), where N is the cardinality of the set, and can be very slow for large sets and it would also take a lot of memory. This presents a problem both in single instance storage as well as geo-replication, since more data will require more time to move. This is why Redis Bloom filters are a better choice for transaction filters. Applications undergo millions of transactions every day, and Bloom filters maintain a speedy response time at scale.

Transaction risk scoring in a microservices architecture for an e-commerce application

The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. digital identity service: handles storing digital identity and calculating identity score
  6. api gateway: unifies services under a single endpoint
  7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
  8. redis: serves as the stream processor and caching database
info

You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

Transaction risk scoring checkout procedure

When a user goes to checkout, the system needs to check the user's digital identity and profile to determine the risk of the transaction. The system can then decide whether to approve the transaction or to trigger additional authentication steps. The following diagram shows the flow of transaction risk scoring in the e-commerce application:

Transaction risk scoring event flow with redis streams

The following steps are performed in the checkout procedure:

  1. The customer adds an item to the cart and proceeds to checkout.
  2. The order service receives the checkout request and creates an order in the database.
  3. The order services publishes a CALCULATE_IDENTITY_SCORE event to the TRANSACTIONS Redis stream.
  4. The identity service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_IDENTITY_SCORE event.
  5. The identity service calculates the identity score for the user and publishes a CALCULATE_PROFILE_SCORE event to the TRANSACTIONS Redis stream.
  6. The profile service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_PROFILE_SCORE event.
  7. The profile service calculates the profile score by checking the products in the shopping cart against a known profile for the customer.
  8. The profile service publishes a ASSESS_RISK event to the TRANSACTIONS Redis stream.
  9. The order service subscribes to the TRANSACTIONS Redis stream and receives the ASSESS_RISK event.
  10. The order service determines if there is a likelihood of fraud based on the identity and profile scores. If there is a likelihood of fraud, the order service triggers additional authentication steps. If there is no likelihood of fraud, the order service approves the order and proceeds to process payments.

E-commerce application frontend using Next.js and Tailwind

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v3.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

Coding example for transaction risk scoring with redis

Now that you understand the steps involved in the checkout process for transaction risk scoring, let's look at the code for the order service and profile service to facilitate this process:

note

To see the code for the identity service check out the digital identity validation solution.

Initiating the checkout process in the order service

When the order service receives a checkout request, it creates an order in the database and publishes a CALCULATE_IDENTITY_SCORE event to the TRANSACTIONS Redis stream. The event contains information about the order as well as the customer, such as the browser fingerprint, IP address, and persona (profile). This data will be used during the transaction by the identity service and profile service to calculate the identity and profile scores. The order service also specifies the transaction pipeline, meaning it determines the order of events called so that the identity service and profile service do not need to be aware of each other. The order service ultimately owns the transaction. The sample code below shows the createOrder function in the order service. The code example below is highly simplified. For more detail please see the source code linked above:

const TransactionPipelines = {
CHECKOUT: [
TransactionStreamActions.CALCULATE_IDENTITY_SCORE,
TransactionStreamActions.CALCULATE_PROFILE_SCORE,
TransactionStreamActions.ASSESS_RISK,
TransactionStreamActions.PROCESS_PAYMENT,
TransactionStreamActions.PAYMENT_PROCESSED,
],
};

async function createOrder(
order: IOrder,
browserAgent: string,
ipAddress: string,
sessionId: string,
sessionData: ISessionData,
) {
order = await validateOrder(order);

const orderId = await addOrderToRedis(order);
order.orderId = orderId;

await addOrderToMongoDB(order);

// Log order creation to the LOGS stream
await streamLog({
action: 'CREATE_ORDER',
message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order created with id ${orderId} for the user ${userId}`,
metadata: {
userId: userId,
persona: sessionData.persona,
sessionId: sessionId,
},
});

let orderAmount = 0;
order.products?.forEach((product) => {
orderAmount += product.productPrice * product.qty;
});

const orderDetails: IOrderDetails = {
orderId: orderId,
orderAmount: orderAmount.toFixed(2),
userId: userId,
sessionId: sessionId,
orderStatus: order.orderStatusCode,
products: order.products,
};

// Initiate the transaction by adding the order details to the transaction stream and sending the first event
await addMessageToTransactionStream({
action: TransactionPipelines.CHECKOUT[0],
logMessage: `[${REDIS_STREAMS.CONSUMERS.IDENTITY}] Digital identity to be validated/ scored for the user ${userId}`,
userId: userId,
persona: sessionData.persona,
sessionId: sessionId,
orderDetails: orderDetails ? JSON.stringify(orderDetails) : '',
transactionPipeline: JSON.stringify(TransactionPipelines.CHECKOUT),

identityBrowserAgent: browserAgent,
identityIpAddress: ipAddress,
});

return orderId;
}

Let's look at the addMessageToTransactionStream function in more detail:

async function addMessageToStream(message, streamKeyName) {
try {
const nodeRedisClient = getNodeRedisClient();
if (nodeRedisClient && message && streamKeyName) {
const id = '*'; //* = auto generate
await nodeRedisClient.xAdd(streamKeyName, id, message);
}
} catch (err) {
LoggerCls.error('addMessageToStream error !', err);
LoggerCls.error(streamKeyName, message);
}
}

async function addMessageToTransactionStream(
message: ITransactionStreamMessage,
) {
if (message) {
const streamKeyName = REDIS_STREAMS.STREAMS.TRANSACTIONS;
await addMessageToStream(message, streamKeyName);
}
}

Checking an order against a known profile in the profile service

So you can see above, the transaction pipeline follows CALCULATE_IDENTITY_SCORE -> CALCULATE_PROFILE_SCORE -> ASSESS_RISK. Let's now look at how the profile service subscribes to the TRANSACTIONS Redis stream and receives the CALCULATE_PROFILE_SCORE event. When the profile service starts, it subscribes to the TRANSACTIONS Redis stream and listens for events.

function listen() {
listenToStreams({
streams: [
{
streamKeyName: REDIS_STREAMS.STREAMS.TRANSACTIONS,
eventHandlers: {
[TransactionStreamActions.CALCULATE_PROFILE_SCORE]:
calculateProfileScore,
},
},
],
groupName: REDIS_STREAMS.GROUPS.PROFILE,
consumerName: REDIS_STREAMS.CONSUMERS.PROFILE,
});
}

A highly simplified version of the listenToStreams method looks as follows. It takes in a list of streams with an associated object that maps events on the stream to a callback for processing the events. It also takes a stream group and a consumer name. Then it handles the subscription to the stream and calling on the appropriate method when an event comes in:

interface ListenStreamOptions {
streams: {
streamKeyName: string;
eventHandlers: {
[messageAction: string]: IMessageHandler;
};
}[];
groupName: string;
consumerName: string;
maxNoOfEntriesToReadAtTime?: number;
}

const listenToStreams = async (options: ListenStreamOptions) => {
/*
(A) create consumer group for the stream
(B) read set of messages from the stream
(C) process all messages received
(D) trigger appropriate action callback for each message
(E) acknowledge individual messages after processing
*/

const nodeRedisClient = getNodeRedisClient();
if (nodeRedisClient) {
const streams = options.streams;
const groupName = options.groupName;
const consumerName = options.consumerName;
const readMaxCount = options.maxNoOfEntriesToReadAtTime || 100;
const idInitialPosition = '0'; //0 = start, $ = end or any specific id
const streamKeyIdArr: {
key: string;
id: string;
}[] = [];

streams.map(async (stream) => {
LoggerCls.info(
`Creating consumer group ${groupName} in stream ${stream.streamKeyName}`,
);

try {
// (A) create consumer group for the stream
await nodeRedisClient.xGroupCreate(
stream.streamKeyName,
groupName,
idInitialPosition,
{
MKSTREAM: true,
},
);
} catch (err) {
LoggerCls.error(
`Consumer group ${groupName} already exists in stream ${stream.streamKeyName}!`,
); //, err
}

streamKeyIdArr.push({
key: stream.streamKeyName,
id: '>', // Next entry ID that no consumer in this group has read
});
});

LoggerCls.info(`Starting consumer ${consumerName}.`);

while (true) {
try {
// (B) read set of messages from different streams
const dataArr = await nodeRedisClient.xReadGroup(
commandOptions({
isolated: true,
}),
groupName,
consumerName,
//can specify multiple streams in array [{key, id}]
streamKeyIdArr,
{
COUNT: readMaxCount, // Read n entries at a time
BLOCK: 5, //block for 0 (infinite) seconds if there are none.
},
);

// dataArr = [
// {
// name: 'streamName',
// messages: [
// {
// id: '1642088708425-0',
// message: {
// key1: 'value1',
// },
// },
// ],
// },
// ];

//(C) process all messages received
if (dataArr && dataArr.length) {
for (let data of dataArr) {
for (let messageItem of data.messages) {
const streamKeyName = data.name;

const stream = streams.find(
(s) => s.streamKeyName == streamKeyName,
);

if (stream && messageItem.message) {
const streamEventHandlers = stream.eventHandlers;
const messageAction = messageItem.message.action;
const messageHandler = streamEventHandlers[messageAction];

if (messageHandler) {
// (D) trigger appropriate action callback for each message
await messageHandler(messageItem.message, messageItem.id);
}
//(E) acknowledge individual messages after processing
nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
}
}
}
} else {
// LoggerCls.info('No new stream entries.');
}
} catch (err) {
LoggerCls.error('xReadGroup error !', err);
}
}
}
};

The processTransactionStream method is called when a new event comes in. It validates the event, making sure it is the CALCULATE_PROFILE_SCORE event, and if it is then it calculates the profile score. It uses a Redis Bloom filter to check if the user has ordered a similar set of products before. It uses a pre-defined persona for the purposes of this demo, but in reality you would build a profile of the user over time. In the demo application, each product has a "master category" and "subcategory". Bloom filters are setup for the master categories as well as the master+subcategories. The scoring logic is highlighted below:

async function calculateProfileScore(
message: ITransactionStreamMessage,
messageId,
) {
LoggerCls.info(`Incoming message in Profile Service ${messageId}`);
if (!(message.orderDetails && message.persona)) {
return false;
}

await streamLog({
action: TransactionStreamActions.CALCULATE_PROFILE_SCORE,
message: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Calculating profile score for the user ${message.userId}`,
metadata: message,
});

// check profile score
const { products }: IOrderDetails = JSON.parse(message.orderDetails);
const persona = message.persona.toLowerCase();
let score = 0;
const nodeRedisClient = getNodeRedisClient();

if (!nodeRedisClient) {
return false;
}

const categories = products.reduce((cat, product) => {
const masterCategory = product.productData?.masterCategory?.typeName;
const subCategory = product.productData?.subCategory?.typeName;

if (masterCategory) {
cat[`${masterCategory}`.toLowerCase()] = true;

if (subCategory) {
cat[`${masterCategory}:${subCategory}`.toLowerCase()] = true;
}
}

return cat;
}, {} as Record<string, boolean>);

const categoryKeys = Object.keys(categories);
const checks = categoryKeys.length;

LoggerCls.info(
`Checking ${checks} categories: ${JSON.stringify(categoryKeys)}`,
);

await Promise.all(
categoryKeys.map(async (category) => {
const exists = await nodeRedisClient.bf.exists(
`bfprofile:${category}`.toLowerCase(),
persona,
);

if (exists) {
score += 1;
}
}),
);

LoggerCls.info(`After ${checks} checks, total score is ${score}`);
score = score / (checks || 1);

await streamLog({
action: TransactionStreamActions.CALCULATE_PROFILE_SCORE,
message: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Profile score for the user ${message.userId} is ${score}`,
metadata: message,
});

await nextTransactionStep({
...message,
logMessage: `[${REDIS_STREAMS.CONSUMERS.PROFILE}] Requesting next step in transaction risk scoring for the user ${message.userId}`,
profileScore: `${score}`,
});

return true;
}

The nextTransactionStep method is called after the profile score has been calculated. It uses the transactionPipeline setup in the order service to publish the ASSESS_RISK event. The logic for this is below:

async function nextTransactionStep(message: ITransactionStreamMessage) {
const transactionPipeline: TransactionStreamActions[] = JSON.parse(
message.transactionPipeline,
);
transactionPipeline.shift();

if (transactionPipeline.length <= 0) {
return;
}

const streamKeyName = REDIS_STREAMS.STREAMS.TRANSACTIONS;
await addMessageToStream(
{
...message,
action: transactionPipeline[0],
transactionPipeline: JSON.stringify(transactionPipeline),
},
streamKeyName,
);
}

In short, the nextTransactionStep method pops the current event off of the transactionPipeline, then it publishes the next event in the pipeline, which in this case is the ASSESS_RISK event.

Finalizing the order with transaction risk scoring in the order service

The order service is responsible for finalizing the order prior to payment. It listens to the ASSESS_RISK event, and then checks the calculated scores to determine if there is potential fraud.

note

The demo application keeps things very simple, and it only sets a "potentialFraud" flag on the order. In the real world, you need to choose not only what scoring makes sense for your application, but also how to handle potential fraud. For example, you may want to request additional information from the customer such as a one-time password. You may also want to send the order to a human for review. It depends on your business and your risk appetite and mitigation strategy.

The logic to process and finalize orders in the order service is below:

async function checkOrderRiskScore(message: ITransactionStreamMessage) {
LoggerCls.info(`Incoming message in Order Service`);
if (!message.orderDetails) {
return false;
}

const orderDetails: IOrderDetails = JSON.parse(message.orderDetails);

if (!(orderDetails.orderId && orderDetails.userId)) {
return false;
}

LoggerCls.info(
`Transaction risk scoring for user ${message.userId} and order ${orderDetails.orderId}`,
);

const { identityScore, profileScore } = message;
const identityScoreNumber = Number(identityScore);
const profileScoreNumber = Number(profileScore);
let potentialFraud = false;

if (identityScoreNumber <= 0 || profileScoreNumber < 0.5) {
LoggerCls.info(
`Transaction risk score is too low for user ${message.userId} and order ${orderDetails.orderId}`,
);

await streamLog({
action: TransactionStreamActions.ASSESS_RISK,
message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order failed fraud checks for orderId ${orderDetails.orderId} and user ${message.userId}`,
metadata: message,
});

potentialFraud = true;
}

orderDetails.orderStatus = ORDER_STATUS.PENDING;
orderDetails.potentialFraud = potentialFraud;

updateOrderStatusInRedis(orderDetails);
/**
* In real world scenario : can use RDI/ redis gears/ any other database to database sync strategy for REDIS-> Store of record data transfer.
* To keep it simple, adding data to MongoDB manually in the same service
*/
updateOrderStatusInMongoDB(orderDetails);

message.orderDetails = JSON.stringify(orderDetails);

await streamLog({
action: TransactionStreamActions.ASSESS_RISK,
message: `[${REDIS_STREAMS.CONSUMERS.ORDERS}] Order status updated after fraud checks for orderId ${orderDetails.orderId} and user ${message.userId}`,
metadata: message,
});

await nextTransactionStep(message);

return true;
}

Visualizing the transaction risk scoring data and event pipeline in RedisInsight

tip

RedisInsight is the free redis GUI for viewing data in redis. Click here to download.

Now that you understand some of the code involved in processing transactions, let's take a look at the data in RedisInsight. First let's look at the TRANSACTION_STREAM key, which is where the stream data is held for the checkout transaction:

RedisInsight transaction risk scoring transaction stream

You can see the action column shows the transaction pipeline discussed earlier. Another thing to look at in RedisInsight is the Bloom filters:

RedisInsight transaction risk scoring bloom filters

These filters are pre-populated in the demo application based on a feature store. Redis is also storing the features, which in this case is the profiles of each of the personas. Below is an example of one of the profile features:

RedisInsight transaction risk scoring feature store

Conclusion

In this post, you learned how to use Redis Streams to build a transaction risk scoring pipeline. You also learned how to use Redis Enterprise as a feature store and Redis Bloom filters to calculate a profile score. Every application is unique, so this tutorial is meant to be a starting point for you to build your own transaction risk scoring pipeline.

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/index.html b/howtos/solutions/index.html index 0b85125746..c7e8428634 100644 --- a/howtos/solutions/index.html +++ b/howtos/solutions/index.html @@ -4,7 +4,7 @@ Solution Tutorials | The Home of Redis Developers - + @@ -12,7 +12,7 @@

Solution Tutorials

This page provides a listing of dozens of popular app solution tutorials from Redis.

Microservices

Learn how to easily build, test and deploy code for common microservice and caching design patterns across different industries using Redis.

How to build an e-commerce app using Redis with the CQRS Pattern
Microservices Communication with Redis streams
How to use Redis for Query Caching
How to use Redis for API Gateway Caching

Fraud detection

How to Handle Digital Identity Validation Using Redis
How to use Redis for Transaction risk scoring

Caching architecture

How to use Redis for Write-behind Caching
How to use Redis for Write through caching strategy
How to use Redis for Cache Prefetching Strategy
How to use Redis for Cache-aside

Real-time Inventory

Available to Promise in Real-time Inventory Using Redis
Real-time Local Inventory Search Using Redis

Mobile Banking

Mobile Banking Authentication and Session Storage Using Redis
Mobile Banking Account Dashboard Using Redis

Vectors

Getting Started with Vector Search Using Redis in NodeJS
- + \ No newline at end of file diff --git a/howtos/solutions/microservices/api-gateway-caching/index.html b/howtos/solutions/microservices/api-gateway-caching/index.html index e4df524035..74868fb727 100644 --- a/howtos/solutions/microservices/api-gateway-caching/index.html +++ b/howtos/solutions/microservices/api-gateway-caching/index.html @@ -4,7 +4,7 @@ How to use Redis for API Gateway Caching | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to use Redis for API Gateway Caching


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

What is API gateway caching?

So you're building a microservices application. But you find yourself struggling with ways to handle authentication that let you reuse code and maximize performance. Typically for authentication you might use sessions, OAuth, authorization tokens, etc. For the purposes of this tutorial, let's assume we're using an authorization token. In a monolithic application, authentication is pretty straightforward:

When a request comes in:

  1. Decode the Authorization header.
  2. Validate the credentials.
  3. Store the session information on the request object or cache for further use down the line by the application.

However, you might be puzzled by how to do this with microservices. Ordinarily, in a microservices application an API gateway serves as the single entry point for clients, which routes traffic to the appropriate services. Depending on the nature of the request, those services may or may not require a user to be authenticated. You might think it's a good idea to handle authentication in each respective service.

While this works, you end up with a fair amount of duplicated code. Plus, it's difficult to understand when and where slowdowns happen and to scale services appropriately, because you repeat some of the same work in each service. A more effective way to handle authentication is to deal with it at the API gateway layer, and then pass the session information down to each service.

Once you decide to handle authentication at the API gateway layer, you must decide where to store sessions.

Imagine you're building an e-commerce application that uses MongoDB/ any relational database as the primary data store. You could store sessions in primary database, but think about how many times the application needs to hit primary database to retrieve session information. If you have millions of customers, you don't want to go to database for every single request made to the API.

This is where Redis comes in.

Why you should use Redis for API gateway caching

Redis is an in-memory datastore, which – among other things – makes it a perfect tool for caching session data. Redis allows you to reduce the load on a primary database while speeding up database reads. The rest of this tutorial covers how to accomplish this in the context of an e-commerce application.

Microservices architecture for an e-commerce application

The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. digital identity service: handles storing digital identity and calculating identity score
  6. api gateway: unifies services under a single endpoint
  7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
  8. redis: serves as the stream processor and caching database
info

You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

The diagram illustrates how the API gateway uses Redis as a cache for session information. The API gateway gets the session from Redis and then passes it on to each microservice. This provides an easy way to handle sessions in a single place, and to permeate them throughout the rest of the microservices.

API gateway caching with Redis architecture diagram

tip

Use a Redis Enterprise Cluster to get the benefit of linear scaling to ensure API calls perform under peak loads. That also provides 99.999% uptime and Active-Active geo-distribution, which prevents loss of authentication and session data.

E-commerce application frontend using Next.js and Tailwind

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

API gateway caching in a microservices application with Redis

What's nice about a microservice architecture is that each service is set up so it can scale independently. Now, seeing as how each service might require authentication, you likely want to obtain session information for most requests. Therefore, it makes sense to use the API gateway to cache and retrieve session information and to subsequently pass the information on to each service. Let's see how you might accomplish this.

In our sample application, all requests are routed through the API gateway. We use Express to set up the API gateway, and the Authorization header to pass the authorization token from the frontend to the API. For every request, the API gateway gets the authorization token and looks it up in Redis. Then it passes it along to the correct microservice.

This code validates the session:

import {
createProxyMiddleware,
responseInterceptor,
} from 'http-proxy-middleware';

//-----
const app: Express = express();

app.use(cors());
app.use(async (req, res, next) => {
const authorizationHeader = req.header('Authorization');
const sessionInfo = await getSessionInfo(authorizationHeader); //---- (1)

//add session info to request
if (sessionInfo?.sessionData && sessionInfo?.sessionId) {
req.session = sessionInfo?.sessionData;
req.sessionId = sessionInfo?.sessionId;
}
next();
});

app.use(
'/orders',
createProxyMiddleware({
// http://localhost:3000/orders/bar -> http://localhost:3001/orders/bar
target: 'http://localhost:3001',
changeOrigin: true,
selfHandleResponse: true,
onProxyReq(proxyReq, req, res) {
// pass session info to microservice
proxyReq.setHeader('x-session', req.session);
},
onProxyRes: applyAuthToResponse, //---- (2)
}),
);

app.use(
'/orderHistory',
createProxyMiddleware({
target: 'http://localhost:3002',
changeOrigin: true,
selfHandleResponse: true,
onProxyReq(proxyReq, req, res) {
// pass session info to microservice
proxyReq.setHeader('x-session', req.session);
},
onProxyRes: applyAuthToResponse, //---- (2)
}),
);
//-----

const getSessionInfo = async (authHeader?: string) => {
// (For demo purpose only) random userId and sessionId values are created for first time, then userId is fetched gainst that sessionId for future requests
let sessionId = '';
let sessionData: string | null = '';

if (!!authHeader) {
sessionId = authHeader.split(/\s/)[1];
} else {
sessionId = 'SES_' + randomUUID(); // generate random new sessionId
}

const nodeRedisClient = getNodeRedisClient();
if (nodeRedisClient) {
const exists = await nodeRedisClient.exists(sessionId);
if (!exists) {
await nodeRedisClient.set(
sessionId,
JSON.stringify({ userId: 'USR_' + randomUUID() }),
); // generate random new userId
}
sessionData = await nodeRedisClient.get(sessionId);
}

return {
sessionId: sessionId,
sessionData: sessionData,
};
};

const applyAuthToResponse = responseInterceptor(
// adding sessionId to the response so that frontend can store it for future requests

async (responseBuffer, proxyRes, req, res) => {
// detect json responses
if (
!!proxyRes.headers['content-type'] &&
proxyRes.headers['content-type'].includes('application/json')
) {
let data = JSON.parse(responseBuffer.toString('utf8'));

// manipulate JSON data here
if (!!(req as Request).sessionId) {
data = Object.assign({}, data, { auth: (req as Request).sessionId });
}

// return manipulated JSON
return JSON.stringify(data);
}

// return other content-types as-is
return responseBuffer;
},
);
info

This example is not meant to represent the best way to handle authentication. Instead, it illustrates what you might do with respect to Redis. You will likely have a different setup for authentication, but the concept of storing a session in Redis is similar.

In the code above, we check for the Authorization header, otherwise we create a new one and store it in Redis. Then we retrieve the session from Redis. Further down the line we attach the session to the x-session header prior to calling the orders service.

Now let's see how the orders service receives the session.

router.post(API_NAMES.CREATE_ORDER, async (req: Request, res: Response) => {
const body = req.body;
const result: IApiResponseBody = {
data: null,
error: null,
};

const sessionData = req.header('x-session');
const userId = sessionData ? JSON.parse(sessionData).userId : "";
...
});

The highlighted line above shows how to pull the session out of the x-session header and get the userId.

Ready to use Redis for API gateway caching ?

That's all there is to it! You now know how to use Redis for API gateway caching. It's not complicated to get started, but this simple practice can help you scale as you build out microservices.

To learn more about Redis, check out the additional resources below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/caching/index.html b/howtos/solutions/microservices/caching/index.html index dfd8c93126..a92c6c3d34 100644 --- a/howtos/solutions/microservices/caching/index.html +++ b/howtos/solutions/microservices/caching/index.html @@ -4,7 +4,7 @@ How to use Redis for Query Caching | The Home of Redis Developers - + @@ -15,7 +15,7 @@ Query caching is the technique you need to speed database queries by using different caching methods while keeping costs down! Imagine that you built an e-commerce application. It started small but is growing fast. By now, you have an extensive product catalog and millions of customers.

That's good for business, but a hardship for technology. Your queries to primary database (MongoDB/ Postgressql) are beginning to slow down, even though you already attempted to optimize them. Even though you can squeak out a little extra performance, it isn't enough to satisfy your customers.

Why you should use Redis for query caching

Redis is an in-memory datastore, best known for caching. Redis allows you to reduce the load on a primary database while speeding up database reads.

With any e-commerce application, there is one specific type of query that is most often requested. If you guessed that it’s the product search query, you’re correct!

To improve product search in an e-commerce application, you can implement one of following caching patterns:

  • Cache prefetching: An entire product catalog can be pre-cached in Redis, and the application can perform any product query on Redis similar to the primary database.
  • Cache-aside pattern: Redis is filled on demand, based on whatever search parameters are requested by the frontend.
tip

If you use Redis Enterprise, cache aside is easier due to its support for JSON and search. You also get additional features such as real-time performance, High scalability, resiliency, and fault tolerance. You can also call upon high-availability features such as Active-Active geo-redundancy.

This tutorial focuses on the cache-aside pattern. The goal of this design pattern is to set up optimal caching (load-as-you-go) for better read operations. With caching, you might be familiar with a "cache miss," where you do not find data in the cache, and a "cache hit," where you can find data in the cache. Let's look at how the cache-aside pattern works with Redis for both a "cache miss" and a "cache hit."

Cache-aside with Redis (cache miss)

Cache miss when using the cache-aside pattern with Redis

This diagram illustrates the steps taken in the cache-aside pattern when there is a "cache miss." To understand how this works, consider the following process:

  1. An application requests data from the backend.
  2. The backend checks to find out if the data is available in Redis.
  3. Data is not found (a cache miss), so the data is fetched from the database.
  4. The data returned from the database is subsequently stored in Redis.
  5. The data is then returned to the application.

Cache-aside with Redis (cache hit)

Now that you have seen what a "cache miss" looks like, let's cover a "cache hit." Here is the same diagram, but with the "cache hit" steps highlighted in green.

Cache hit when using the cache-aside pattern with Redis
  1. An application requests data from the backend.
  2. The backend checks to find out if the data is available in Redis.
  3. The data is then returned to the application.

The cache-aside pattern is useful when you need to:

  1. Query data frequently: When you have a large volume of reads (as is the case in an e-commerce application), the cache-aside pattern gives you an immediate performance gain for subsequent data requests.
  2. Fill the cache on demand: The cache-aside pattern fills the cache as data is requested rather than pre-caching, thus saving on space and cost. This is useful when it isn't clear what data will need to be cached.
  3. Be cost-conscious: Since cache size is directly related to the cost of cache storage in the cloud, the smaller the size, the less you pay.
tip

If you use Redis Enterprise and a database that uses a JDBC driver, you can take advantage of Redis Smart Cache, which lets you add caching to an application without changing the code. Click here to learn more!

Microservices architecture for an e-commerce application

The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. digital identity service: handles storing digital identity and calculating identity score
  6. api gateway: unifies services under a single endpoint
  7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
  8. redis: serves as the stream processor and caching database
info

You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

E-commerce application frontend using Next.js and Tailwind

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

Caching in a microservices application with Redis and primary database (MongoDB/ Postgressql)

In our sample application, the products service publishes an API for filtering products. Here's what a call to the API looks like:

Get products by filter request

docs/api/get-products-by-filter.md
// POST http://localhost:3000/products/getProductsByFilter
{
"productDisplayName": "puma"
}

Get products by filter response (cache miss)

{
"data": [
{
"productId": "11000",
"price": 3995,
"productDisplayName": "Puma Men Slick 3HD Yellow Black Watches",
"variantName": "Slick 3HD Yellow",
"brandName": "Puma",
"ageGroup": "Adults-Men",
"gender": "Men",
"displayCategories": "Accessories",
"masterCategory_typeName": "Accessories",
"subCategory_typeName": "Watches",
"styleImages_default_imageURL": "http://host.docker.internal:8080/images/11000.jpg",
"productDescriptors_description_value": "<p style=\"text-align: justify;\">Stylish and comfortable, ...",
"createdOn": "2023-07-13T14:07:38.020Z",
"createdBy": "ADMIN",
"lastUpdatedOn": "2023-07-13T14:07:38.020Z",
"lastUpdatedBy": null,
"statusCode": 1
}
//...
],
"error": null,
"isFromCache": false
}

Get products by filter response (cache hit)

{
"data": [
//...same data as above
],
"error": null,
"isFromCache": true // now the data comes from the cache rather DB
}

Implementing cache-aside with Redis and primary database (MongoDB/ Postgressql)

The following code shows the function used to search for products in primary database:

server/src/services/products/src/service-impl.ts
async function getProductsByFilter(productFilter: Product) {
const prisma = getPrismaClient();

const whereQuery: Prisma.ProductWhereInput = {
statusCode: DB_ROW_STATUS.ACTIVE,
};

if (productFilter && productFilter.productDisplayName) {
whereQuery.productDisplayName = {
contains: productFilter.productDisplayName,
mode: 'insensitive',
};
}

const products: Product[] = await prisma.product.findMany({
where: whereQuery,
});

return products;
}

You simply make a call to primary database (MongoDB/ Postgressql) to find products based on a filter on the product's displayName property. You can set up multiple columns for better fuzzy searching, but we simplified it for the purposes of this tutorial.

Using primary database directly without Redis works for a while, but eventually it slows down. That's why you might use Redis, to speed things up. The cache-aside pattern helps you balance performance with cost.

The basic decision tree for cache-aside is as follows.

When the frontend requests products:

  1. Form a hash with the contents of the request (i.e., the search parameters).
  2. Check Redis to see if a value exists for the hash.
  3. Is there a cache hit? If data is found for the hash, it is returned; the process stops here.
  4. Is there a cache miss? When data is not found, it is read out of primary database and subsequently stored in Redis prior to being returned.

Here’s the code used to implement the decision tree:

server/src/services/products/src/routes.ts
const getHashKey = (_filter: Document) => {
let retKey = '';
if (_filter) {
const text = JSON.stringify(_filter);
retKey = crypto.createHash('sha256').update(text).digest('hex');
}
return 'CACHE_ASIDE_' + retKey;
};

router.post(API.GET_PRODUCTS_BY_FILTER, async (req: Request, res: Response) => {
const body = req.body;
// using node-redis
const redis = getNodeRedisClient();

//get data from redis
const hashKey = getHashKey(req.body);
const cachedData = await redis.get(hashKey);
const docArr = cachedData ? JSON.parse(cachedData) : [];

if (docArr && docArr.length) {
result.data = docArr;
result.isFromCache = true;
} else {
// get data from primary database
const dbData = await getProductsByFilter(body); //method shown earlier

if (body && body.productDisplayName && dbData.length) {
// set data in redis (no need to wait)
redis.set(hashKey, JSON.stringify(dbData), {
EX: 60, // cache expiration in seconds
});
}

result.data = dbData;
}

res.send(result);
});
tip

You need to decide what expiry or time to live (TTL) works best for your particular use case.

Ready to use Redis for query caching?

You now know how to use Redis for caching with one of the most common caching patterns (cache-aside). It's possible to incrementally adopt Redis wherever needed with different strategies/patterns. For more resources on the topic of microservices, check out the links below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html b/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html index fcc7c80e4e..683e986b59 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch-with-redis-old/index.html @@ -4,7 +4,7 @@ microservices-arch-with-redis-old | The Home of Redis Developers - + @@ -12,7 +12,7 @@

microservices-arch-with-redis-old

The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. digital identity service: handles storing digital identity and calculating identity score
  6. api gateway: unifies services under a single endpoint
  7. mongodb: serves as the primary database, storing orders, order history, products, etc.
  8. redis: serves as the stream processor and caching database
- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html b/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html index 4cabc14724..4379bdf0c3 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch-with-redis/index.html @@ -4,7 +4,7 @@ microservices-arch-with-redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

microservices-arch-with-redis

The e-commerce microservices application discussed in the rest of this tutorial uses the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. digital identity service: handles storing digital identity and calculating identity score
  6. api gateway: unifies services under a single endpoint
  7. mongodb/ postgresql: serves as the primary database, storing orders, order history, products, etc.
  8. redis: serves as the stream processor and caching database
info

You don't need to use MongoDB/ Postgresql as your primary database in the demo application; you can use other prisma supported databases as well. This is just an example.

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-arch/index.html b/howtos/solutions/microservices/common-data/microservices-arch/index.html index 9589d6de74..1fcc39b3b7 100644 --- a/howtos/solutions/microservices/common-data/microservices-arch/index.html +++ b/howtos/solutions/microservices/common-data/microservices-arch/index.html @@ -4,7 +4,7 @@ microservices-arch | The Home of Redis Developers - + @@ -12,7 +12,7 @@

microservices-arch

You eventually land on the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. api gateway: unifies the services under a single endpoint
  6. mongodb/ postgresql: serves as the write-optimized database for storing orders, order history, products, etc.
info

You don't need to use MongoDB/ Postgresql as your write-optimized database in the demo application; you can use other prisma supported databases as well. This is just an example.

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html b/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html index d89b41a9a6..4bfdcacc7b 100644 --- a/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-ecommerce-old/index.html @@ -4,7 +4,7 @@ microservices-ecommerce-old | The Home of Redis Developers - + @@ -14,7 +14,7 @@

microservices-ecommerce-old

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html b/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html index 8c2e5a2f77..591f935260 100644 --- a/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html +++ b/howtos/solutions/microservices/common-data/microservices-ecommerce/index.html @@ -4,7 +4,7 @@ microservices-ecommerce | The Home of Redis Developers - + @@ -14,7 +14,7 @@

microservices-ecommerce

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html b/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html index 05d36c0208..b8f4a51c82 100644 --- a/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html +++ b/howtos/solutions/microservices/common-data/microservices-source-code-tip-old/index.html @@ -4,7 +4,7 @@ microservices-source-code-tip-old | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html b/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html index 8a0765ea3b..283eb61e2e 100644 --- a/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html +++ b/howtos/solutions/microservices/common-data/microservices-source-code-tip/index.html @@ -4,7 +4,7 @@ microservices-source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
- + \ No newline at end of file diff --git a/howtos/solutions/microservices/common-data/redis-enterprise/index.html b/howtos/solutions/microservices/common-data/redis-enterprise/index.html index cdae0741c8..0fa1404e4c 100644 --- a/howtos/solutions/microservices/common-data/redis-enterprise/index.html +++ b/howtos/solutions/microservices/common-data/redis-enterprise/index.html @@ -4,7 +4,7 @@ redis-enterprise | The Home of Redis Developers - + @@ -12,7 +12,7 @@

redis-enterprise

You can use Redis Enterprise as a multi-model primary database. Redis Enterprise is a fully managed, highly available, secure, and real-time data platform. It can store data on both RAM or Flash. It also supports Active-Active (multi-zone read and write replicas) on different cloud vendors, providing extreme high availability and scalability. Active-Active offers global scalability while maintaining local speed for database reads and writes.

Redis Enterprise has many built-in modular capabilities, making it a unified, real-time data platform. Redis Enterprise is far more than a document database.

  • JSON: Persists JSON documents
  • Search: Indexes and searches JSON documents
  • Probabilistic data structures: Provides bloom filters and other probabilistic data structures
  • Time Series: Supports time series data structures
  • Triggers and Functions: Syncs data to external databases via different pattern (write-behind/ write-through) or executes custom logic.

Use RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

If you're interested in diving deeper, try Redis Enterprise today for free!

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/cqrs/index.html b/howtos/solutions/microservices/cqrs/index.html index 61e53ca572..a04b6e8934 100644 --- a/howtos/solutions/microservices/cqrs/index.html +++ b/howtos/solutions/microservices/cqrs/index.html @@ -4,7 +4,7 @@ How to Build an E-Commerce App Using Redis with the CQRS Pattern | The Home of Redis Developers - + @@ -14,7 +14,7 @@

How to Build an E-Commerce App Using Redis with the CQRS Pattern


Profile picture for Prasan Kumar
Author:
Prasan Kumar, Technical Solutions Developer at Redis
Profile picture for Will Johnston
Author:
Will Johnston, Developer Growth Manager at Redis

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

What is command and query responsibility segregation (CQRS)?

Command Query Responsibility Segregation (CQRS) is a critical pattern within a microservice architecture. It decouples reads (queries) and writes (commands), which permits read and write workloads to work independently.

Commands(write) focus on higher durability and consistency, while queries(read) focus on performance. This enables a microservice to write data to a slower system of record disk-based database, while pre-fetching and caching that data in a cache for real-time reads.

The idea is simple: you separate commands such as "Order this product" (a write operation) from queries such as "Show me my order history" (a read operation). CQRS applications are often messaging-based and rely on eventual consistency.

The sample data architecture that follows demonstrates how to use Redis with CQRS:

CQRS architecture with Redis

The architecture illustrated in the diagram uses the Change Data Capture pattern (noted as "Integrated CDC") to track the changed state on the command database and to replicate it to the query database (Redis). This is a common pattern used with CQRS.

Implementing CDC requires:

  1. Taking the data snapshot from the system of record
  2. Performing an ETL operation finalized to load the data on the target cache database
  3. Setting up a mechanism to continuously stream the changes in the system of record to the cache
tip

While you can implement your own CDC mechanism with Redis using RedisGears, Redis Enterprise comes with its own integrated CDC mechanism to solve this problem for you.

Why you might use CQRS

To improve application performance, scale your read and write operations separately.

Consider the following scenario: You have an e-commerce application that allows a customer to populate a shopping cart with products. The site has a "Buy Now" button to facilitate ordering those products. When first starting out, you might set up and populate a product database (perhaps a SQL database). Then you might write a backend API to handle the processes of creating an order, creating an invoice, processing payments, handling fulfillment, and updating the customer's order history… all in one go.

This method of synchronous order processing seemed like a good idea. But you soon find out that your database slows down as you gain more customers and have a higher sales volume. In reality, most applications have significantly more reads than writes. You should scale those operations separately.

You decide that you need to process orders quickly so the customer doesn't have to wait. Then, when you have time, you can create an invoice, process payment, handle fulfillment, etc.

So you decide to separate each of these steps. Using a microservices approach with CQRS allows you to scale your reads and writes independently as well as aid in decoupling your microservices. With a CQRS model, a single service is responsible for handling an entire command from end to end. One service should not depend on another service in order to complete a command.

Microservices CQRS architecture for an e-commerce application

You eventually land on the following architecture:

  1. products service: handles querying products from the database and returning them to the frontend
  2. orders service: handles validating and creating orders
  3. order history service: handles querying a customer's order history
  4. payments service: handles processing orders for payment
  5. api gateway: unifies the services under a single endpoint
  6. mongodb/ postgresql: serves as the write-optimized database for storing orders, order history, products, etc.
info

You don't need to use MongoDB/ Postgresql as your write-optimized database in the demo application; you can use other prisma supported databases as well. This is just an example.

Using CQRS in a microservices architecture

Note that in the current architecture all the services use the same underlying database. Even though you’re technically separating reads and writes, you can't scale the write-optimized database independently. This is where Redis comes in. If you put Redis in front of your write-optimized database, you can use it for reads while writing to the write-optimized database. The benefit of Redis is that it’s fast for reads and writes, which is why it’s the best choice for caching and CQRS.

info

For the purposes of this tutorial, we’re not highlighting how communication is coordinated between our services, such as how new orders are processed for payment. That process uses Redis Streams, and is outlined in our interservice communication guide.

tip

When your e-commerce application eventually needs to scale across the globe, Redis Enterprise provides Active-Active geo-distribution for reads and writes at local latencies as well as availability of 99.999% uptime.

Let's look at some sample code that helps facilitate the CQRS pattern with Redis and Primary database (MongoDB/ Postgressql).

E-commerce application frontend using Next.js and Tailwind

The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB/ Postgressql using Prisma. Below you will find screenshots of the frontend of the e-commerce app:

  • Dashboard: Shows the list of products with search functionality

    redis microservices e-commerce app frontend products page

  • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

  • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

    redis microservices e-commerce app frontend order history page

GITHUB CODE

Below is a command to the clone the source code for the application used in this tutorial

git clone --branch v4.2.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

Building a CQRS microservices application with Redis and Primary database (MongoDB/ Postgressql)

Let's look at the sample code for the order service and see the CreateOrder command (a write operation). Then we look at the order history service to see the ViewOrderHistory command (a read operation).

Create order command API

The code that follows shows an example API request and response to create an order.

Create order request

docs/api/create-order.md
// POST http://api-gateway/orders/createOrder
{
"products": [
{
"productId": "11002",
"qty": 1,
"productPrice": 4950
},
{
"productId": "11012",
"qty": 2,
"productPrice": 1195
}
]
}

Create order response

{
"data": "d4075f43-c262-4027-ad25-7b1bc8c490b6", //orderId
"error": null
}

When you make a request, it goes through the API gateway to the orders service. Ultimately, it ends up calling a createOrder function which looks as follows:

server/src/services/orders/src/service-impl.ts
const createOrder = async (
order: IOrder,
//...
) => {
if (!order) {
throw 'Order data is mandatory!';
}

const userId = order.userId || USERS.DEFAULT; // Used as a shortcut, in a real app you would use customer session data to fetch user details
const orderId = uuidv4();

order.orderId = orderId;
order.orderStatusCode = ORDER_STATUS.CREATED;
order.userId = userId;
order.createdBy = userId;
order.statusCode = DB_ROW_STATUS.ACTIVE;
order.potentialFraud = false;

order = await validateOrder(order);

const products = await getProductDetails(order);
addProductDataToOrders(order, products);

await addOrderToRedis(order);

await addOrderToPrismaDB(order);

//...

return orderId;
};
info

For tutorial simplicity, we add data to both primary database and Redis in the same service (double-write). As mentioned earlier, a common pattern is to have your services write to one database, and then separately use a CDC mechanism to update the other database. For example, you could write directly to Redis, then use RedisGears to handle synchronizing Redis and primary database in the background. For the purposes of this tutorial, we don't outline exactly how you might handle synchronization, but instead focus on how the data is stored and accessed in Redis.

tip

If you're using Redis Enterprise, you can take advantage of the integrated CDC mechanism to avoid having to roll your own.

Note that in the previous code block we call the addOrderToRedis function to store orders in Redis. We use Redis OM for Node.js to store the order entities in Redis. This is what that function looks like:

server/src/services/orders/src/service-impl.ts
import { Schema, Repository } from 'redis-om';
import { getNodeRedisClient } from '../utils/redis/redis-wrapper';

//Redis Om schema for Order
const schema = new Schema('Order', {
orderId: { type: 'string', indexed: true },

orderStatusCode: { type: 'number', indexed: true },
potentialFraud: { type: 'boolean', indexed: false },
userId: { type: 'string', indexed: true },

createdOn: { type: 'date', indexed: false },
createdBy: { type: 'string', indexed: true },
lastUpdatedOn: { type: 'date', indexed: false },
lastUpdatedBy: { type: 'string', indexed: false },
statusCode: { type: 'number', indexed: true },
});

//Redis OM repository for Order (to read, write and remove orders)
const getOrderRepository = () => {
const redisClient = getNodeRedisClient();
const repository = new Repository(schema, redisClient);
return repository;
};

//Redis indexes data for search
const createRedisIndex = async () => {
const repository = getRepository();
await repository.createIndex();
};

const addOrderToRedis = async (order: OrderWithIncludes) => {
if (order) {
const repository = getOrderRepository();
//insert Order in to Redis
await repository.save(order.orderId, order);
}
};

Sample Order view using RedisInsight

sample order
tip

Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

Order history API

The code that follows shows an example API request and response to get a customer's order history.

Order history request

docs/api/view-order-history.md
// GET http://api-gateway/orderHistory/viewOrderHistory

Order history response

{
"data": [
{
"orderId": "d4075f43-c262-4027-ad25-7b1bc8c490b6",
"userId": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b",
"orderStatusCode": 4,
"products": [
{
"productId": "11002",
"qty": 1,
"productPrice": 4950,
"productData": {
"productId": "11002",
"price": 4950,
"productDisplayName": "Puma Men Race Black Watch",
"variantName": "Race 85",
"brandName": "Puma",
"ageGroup": "Adults-Men",
"gender": "Men",
"displayCategories": "Accessories",
"masterCategory_typeName": "Accessories",
"subCategory_typeName": "Watches",
"styleImages_default_imageURL": "http://host.docker.internal:8080/images/11002.jpg",
"productDescriptors_description_value": "<p>This watch from puma comes in a heavy duty design. The assymentric dial and chunky..."
}
},
{
"productId": "11012",
"qty": 2,
"productPrice": 1195,
"productData": {
"productId": "11012",
"price": 1195,
"productDisplayName": "Wrangler Women Frill Check Multi Tops",
"variantName": "FRILL CHECK",
"brandName": "Wrangler",
"ageGroup": "Adults-Women",
"gender": "Women",
"displayCategories": "Sale and Clearance,Casual Wear",
"masterCategory_typeName": "Apparel",
"subCategory_typeName": "Topwear",
"styleImages_default_imageURL": "http://host.docker.internal:8080/images/11012.jpg",
"productDescriptors_description_value": "<p><strong>Composition</strong><br /> Navy blue, red, yellow and white checked top made of 100% cotton, with a jabot collar, buttoned ..."
}
}
],
"createdBy": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b",
"lastUpdatedOn": "2023-07-13T14:11:49.997Z",
"lastUpdatedBy": "USR_22fcf2ee-465f-4341-89c2-c9d16b1f711b"
}
],
"error": null
}

When you make a request, it goes through the API gateway to the order history service. Ultimately, it ends up calling a viewOrderHistory function, which looks as follows:

server/src/services/order-history/src/service-impl.ts
const viewOrderHistory = async (userId: string) => {
const repository = OrderRepo.getRepository();
let orders: Partial<IOrder>[] = [];
const queryBuilder = repository
.search()
.where('createdBy')
.eq(userId)
.and('orderStatusCode')
.gte(ORDER_STATUS.CREATED) //returns CREATED and PAYMENT_SUCCESS
.and('statusCode')
.eq(DB_ROW_STATUS.ACTIVE);

console.log(queryBuilder.query);
orders = <Partial<IOrder>[]>await queryBuilder.return.all();
};
info

Note that the order history service only needs to go to Redis for all orders. This is because we handle storage and synchronization between Redis and primary database within the orders service.

You might be used to using Redis as a cache and both storing and retrieving stringified JSON values or perhaps hashed values. However, look closely at the code above. In it, we store orders as JSON documents, and then use Redis OM to search for the orders that belong to a specific user. Redis operates like a search engine, here, with the ability to speed up queries and scale independently from the primary database (which in this case is MongoDB/ Postgressql).

Ready to use Redis with the CQRS pattern?

Hopefully, this tutorial has helped you visualize how to use Redis with the CQRS pattern. It can help to reduce the load on your primary database while still allowing you to store and search JSON documents. For additional resources related to this topic, check out the links below:

Additional resources

- + \ No newline at end of file diff --git a/howtos/solutions/microservices/interservice-communication/index.html b/howtos/solutions/microservices/interservice-communication/index.html index b3905cf915..b7edde086c 100644 --- a/howtos/solutions/microservices/interservice-communication/index.html +++ b/howtos/solutions/microservices/interservice-communication/index.html @@ -4,7 +4,7 @@ Microservices Communication with Redis Streams | The Home of Redis Developers - + @@ -16,7 +16,7 @@ payments-stream

  • The orders service listens to the PAYMENTS_STREAM and updates the orderStatus and paymentId for orders in the database accordingly as the order payment is fulfilled (i.e., it acts as the CONSUMER of the PAYMENTS_STREAM).

  • {
    //order collection update
    "orderId": "01GTP3K2TZQQCQ0T2G43DSSMTD",
    "paymentId": "6403212956a976300afbaac1",
    "orderStatusCode": 3 //payment success
    //...
    }

    E-commerce application frontend using Next.js and Tailwind

    The e-commerce microservices application consists of a frontend, built using Next.js with TailwindCSS. The application backend uses Node.js. The data is stored in Redis and MongoDB. Below you will find screenshots of the frontend of the e-commerce app:

    • Dashboard: Shows the list of products with search functionality

      redis microservices e-commerce app frontend products page

    • Shopping Cart: Add products to the cart, then check out using the "Buy Now" button redis microservices e-commerce app frontend shopping cart

    • Order history: Once an order is placed, the Orders link in the top navigation bar shows the order status and history

      redis microservices e-commerce app frontend order history page

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.0.0 https://github.com/redis-developer/redis-microservices-ecommerce-solutions

    Building an interservice communication application with Redis

    We use Redis to broker the events sent between the orders service and the payments service.

    Producer 1 (orders service)

    Let's look at some of the code in the orders service to understand how it works:

    1. Orders are created.
    2. After order creation, the orders service appends minimal data to the ORDERS_STREAM to signal new order creation.
    server/src/services/orders/src/service-impl.ts
    const addOrderIdToStream = async (
    orderId: string,
    orderAmount: number,
    userId: string,
    ) => {
    const nodeRedisClient = getNodeRedisClient();
    if (orderId && nodeRedisClient) {
    const streamKeyName = 'ORDERS_STREAM';
    const entry = {
    orderId: orderId,
    orderAmount: orderAmount.toFixed(2),
    userId: userId,
    };
    const id = '*'; //* = auto generate
    //xAdd adds entry to specified stream
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    }
    };

    Consumer 1 (payments service)

    1. The payments service listens to the ORDERS_STREAM
    server/src/services/payments/src/service-impl.ts
    // Below is some code for how you would use Redis to listen for the stream events:

    async function listenToStream(
    onMessage: (message: any, messageId: string) => Promise<void>,
    ) {
    // using node-redis
    const redis = getNodeRedisClient();
    const streamKeyName = 'ORDERS_STREAM'; //stream name
    const groupName = 'ORDERS_CON_GROUP'; // listening consumer group name (custom)
    const consumerName = 'PAYMENTS_CON'; // listening consumer name (custom)
    const readMaxCount = 100;

    // Check if the stream group already exists
    if (!(await redis.exists(streamKeyName))) {
    const idPosition = '0'; //0 = start, $ = end or any specific id
    await nodeRedisClient.xGroupCreate(streamKeyName, groupName, idPosition, {
    MKSTREAM: true,
    });
    }

    // setup a loop to listen for stream events
    while (true) {
    // read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    [
    {
    // you can specify multiple streams in array
    key: streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    },
    ],
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 0, // block for 0 (infinite) seconds if there are none.
    },
    );

    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    // process the message received (in our case, perform payment)
    await onMessage(messageItem.message, messageItem.id);

    // acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    }

    // `listenToStream` listens for events and calls the `onMessage` callback to further handle the events.
    listenToStream({
    onMessage: processPaymentForNewOrders,
    });

    const processPaymentForNewOrders: IMessageHandler = async (
    message,
    messageId,
    ) => {
    /*
    message = {
    orderId: "",
    orderAmount: "",
    userId: "",
    }
    */
    // process payment for new orderId and insert "payments" data to database
    };
    note

    There are a few important things to note here:

    1. Make sure the stream group doesn't exist prior to creating it.
    2. Use isolated: true, in order to use the blocking version of XREADGROUP in isolated execution mode.
    3. Acknowledge individual messages after you process them to remove the messages from the pending orders queue and to avoid processing them more than once.

    Producer 2 (payments service)

    1. The payments service appends minimal data to PAYMENTS_STREAM to signal that a payment has been fulfilled.
    server/src/services/payments/src/service-impl.ts
    const addPaymentIdToStream = async (
    orderId: string,
    paymentId: string,
    orderStatus: number,
    userId: string,
    ) => {
    const nodeRedisClient = getNodeRedisClient();
    if (orderId && nodeRedisClient) {
    const streamKeyName = 'PAYMENTS_STREAM';
    const entry = {
    orderId: orderId,
    paymentId: paymentId,
    orderStatusCode: orderStatus.toString(),
    userId: userId,
    };
    const id = '*'; //* = auto generate
    //xAdd adds entry to specified stream
    await nodeRedisClient.xAdd(streamKeyName, id, entry);
    }
    };

    Consumer 2 (orders service)

    1. The orders service listens to the PAYMENTS_STREAM and updates the order when payments are fulfilled.
    server/src/services/orders/src/service-impl.ts
    //Below is some code for how you would use Redis to listen for the stream events:

    async function listenToStream(
    onMessage: (message: any, messageId: string) => Promise<void>,
    ) {
    // using node-redis
    const redis = getNodeRedisClient();
    const streamKeyName = 'PAYMENTS_STREAM'; //stream name
    const groupName = 'PAYMENTS_CON_GROUP'; //listening consumer group name (custom)
    const consumerName = 'ORDERS_CON'; //listening consumer name (custom)
    const readMaxCount = 100;

    // Check if the stream group already exists
    if (!(await redis.exists(streamKeyName))) {
    const idPosition = '0'; //0 = start, $ = end or any specific id
    await nodeRedisClient.xGroupCreate(streamKeyName, groupName, idPosition, {
    MKSTREAM: true,
    });
    }

    // setup a loop to listen for stream events
    while (true) {
    // read set of messages from different streams
    const dataArr = await nodeRedisClient.xReadGroup(
    commandOptions({
    isolated: true,
    }),
    groupName,
    consumerName,
    [
    {
    // you can specify multiple streams in array
    key: streamKeyName,
    id: '>', // Next entry ID that no consumer in this group has read
    },
    ],
    {
    COUNT: readMaxCount, // Read n entries at a time
    BLOCK: 0, // block for 0 (infinite) seconds if there are none.
    },
    );

    for (let data of dataArr) {
    for (let messageItem of data.messages) {
    //process the message received (in our case, updateOrderStatus)
    await onMessage(messageItem.message, messageItem.id);

    // acknowledge individual messages after processing
    nodeRedisClient.xAck(streamKeyName, groupName, messageItem.id);
    }
    }
    }
    }

    // `listenToStream` listens for events and calls the `onMessage` callback to further handle the events.
    listenToStream({
    onMessage: updateOrderStatus,
    });

    const updateOrderStatus: IMessageHandler = async (message, messageId) => {
    /*
    message = {
    orderId: "",
    paymentId: "",
    orderStatusCode:"",
    userId: "",
    }
    */
    // updates orderStatus and paymentId in database accordingly for the order which has fulfilled payment
    // updateOrderStatusInRedis(orderId,paymentId,orderStatusCode,userId)
    // updateOrderStatusInMongoDB(orderId,paymentId,orderStatusCode,userId)
    };
    tip

    It's a best practice to validate all incoming messages to make sure you can work with them.

    For the purposes of our application, we make a call to update the order status in both Redis and primary database in the same service (For simplicity, we are not using any synchronization technique between databases rather focusing on how the data is stored and accessed in Redis). Another common pattern is to have your services write to one database, and then separately use a CDC mechanism to update the other database. For example, you could write directly to Redis, then use Triggers and Functions to handle synchronizing Redis and primary database in the background.

    tip

    If you use Redis Enterprise, you will find that Redis Streams is available on the same multi-tenant data platform you already use for caching. Redis Enterprise also has high availability, message persistence, support for multiple clients, and resiliency with primary/secondary data replication… all built in.

    Ready to use Redis for streaming?

    That's all there is to it! You now know how to use Redis for streaming as both a producer and a consumer. Hopefully, you can draw some inspiration from this tutorial and apply it to your own event streaming application. For more on this topic, check out the additional resources below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/account-dashboard/index.html b/howtos/solutions/mobile-banking/account-dashboard/index.html index 1c28e70cf8..6804632059 100644 --- a/howtos/solutions/mobile-banking/account-dashboard/index.html +++ b/howtos/solutions/mobile-banking/account-dashboard/index.html @@ -4,7 +4,7 @@ Mobile Banking Account Dashboard Using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Mobile Banking Account Dashboard Using Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.2.0 https://github.com/redis-developer/mobile-banking-solutions

    What is a mobile banking account dashboard?

    An account dashboard is a page in a mobile banking app that instantly renders account highlights to users. A customer can click on any of the accounts on the dashboard to see the real-time account details, such as latest transactions, mortgage amount they have left to pay, checking and savings, etc.

    An account dashboard makes a customer's finances easily visible in one place. It reduces financial complexity for the customer and fosters customer loyalty.

    The following diagram is an example data architecture for an account dashboard:

    dashboard

    1. Banks store information in a number of separate databases that support individual banking products
    2. Key customer account details (balances, recent transactions) across the banks product portfolio are prefetched into Redis Enterprise using Redis Data Integration (RDI)
    3. Redis Enterprise powers customer's account dashboards, enabling mobile banking users to view balances and other high-priority information immediately upon login

    Why you should use Redis for account dashboards in mobile banking

    • Resilience: Redis Enterprise provides resilience with 99.999% uptime and Active-Active Geo Distribution to prevent loss of critical user profile data

    • Scalability: Redis Enterprise provides < 1ms performance at incredibly high scale to ensure apps perform under peak loads

    • JSON Support: Provides the ability to create and store account information as JSON documents with the < 1ms speed of Redis

    • Querying and Indexing: Redis Enterprise can quickly identify and store data from multiple different databases and index data to make it readily searchable

    note

    Redis Stack supports the JSON data type and allows you to index and querying JSON and more. So your Redis data is not limited to simple key-value stringified data.

    Building an account dashboard with Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone --branch v1.2.0 https://github.com/redis-developer/mobile-banking-solutions

    Download the above source code and run following command to start the demo application

    docker compose up -d

    After docker up & running, open http://localhost:8080/ url in browser to view application

    Data seeding

    This application leverages Redis core data structures, JSON, TimeSeries, Search and Query features. The data seeded is later used to show a searchable transaction overview with realtime updates as well as a personal finance management overview with realtime balance and biggest spenders updates.

    On application startup in app/server.js, a cron is scheduled to create random bank transactions at regular intervals and seed those transactions in to Redis.

    app/server.js
    //cron job to trigger createBankTransaction() at regular intervals

    cron.schedule('*/10 * * * * *', async () => {
    const userName = process.env.REDIS_USERNAME;

    createBankTransaction(userName);

    //...
    });
    • The transaction generator creates a randomized banking debit or credit which will reflect on a (default) starting user balance of $100,000.00
    • The transaction data is saved as a JSON document within Redis.
    • To capture balance over time, the balanceAfter value is recorded in a TimeSeries with the key balance_ts for every transaction.
    • To track biggest spenders, an associated fromAccountName member within the sorted set bigspenders is incremented by the transaction amount. Note that this amount can be positive or negative.
    app/transactions/transactionsGenerator.js
    let balance = 100000.0;
    const BALANCE_TS = 'balance_ts';
    const SORTED_SET_KEY = 'bigspenders';

    export const createBankTransaction = async () => {
    //to create random bank transaction
    let vendorsList = source.source; //app/transactions/transaction_sources.js
    const random = Math.floor(Math.random() * 9999999999);

    const vendor = vendorsList[random % vendorsList.length]; //random vendor from the list

    const amount = createTransactionAmount(vendor.fromAccountName, random);
    const transaction = {
    id: random * random,
    fromAccount: Math.floor((random / 2) * 3).toString(),
    fromAccountName: vendor.fromAccountName,
    toAccount: '1580783161',
    toAccountName: 'bob',
    amount: amount,
    description: vendor.description,
    transactionDate: new Date(),
    transactionType: vendor.type,
    balanceAfter: balance,
    };

    //redis json feature
    const bankTransaction = await bankTransactionRepository.save(transaction);
    console.log('Created bankTransaction!');
    // ...
    };

    const createTransactionAmount = (vendor, random) => {
    let amount = createAmount(); //random amount
    balance += amount;
    balance = parseFloat(balance.toFixed(2));

    //redis time series feature
    redis.ts.add(BALANCE_TS, '*', balance, { DUPLICATE_POLICY: 'first' });
    //redis sorted set as secondary index
    redis.zIncrBy(SORTED_SET_KEY, amount * -1, vendor);

    return amount;
    };

    Sample bankTransaction data view using RedisInsight

    bank transaction data

    bank transaction json

    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

    Balance over time

    Dashboard widget

    Chart

    API endpoint

    Endpoint/transaction/balance
    Code Location/routers/transaction-router.js
    Parametersnone
    Return value[{x: timestamp, y: value}, ...]

    The balance endpoint leverages Time Series, It returns the range of all values from the time series object balance_ts. The resulting range is converted to an array of objects with each object containing an x property containing the timestamp and a y property containing the associated value. This endpoint supplies the time series chart with coordinates to plot a visualization of the balance over time.

    app/routers/transaction-router.js
    const BALANCE_TS = 'balance_ts';

    /* fetch transactions up to sometime ago */
    transactionRouter.get('/balance', async (req, res) => {
    //time series range
    const balance = await redis.ts.range(
    BALANCE_TS,
    Date.now() - 1000 * 60 * 5, //from
    Date.now(), //to
    );

    let balancePayload = balance.map((entry) => {
    return {
    x: entry.timestamp,
    y: entry.value,
    };
    });

    res.send(balancePayload);
    });

    Biggest spenders

    Dashboard widget

    Chart

    API end point

    Endpoint/transaction//biggestspenders
    Code Location/routers/transaction-router.js
    Parametersnone
    Return value{labels:[...], series:[...] }

    The biggest spenders endpoint leverages sorted sets as a secondary index, It retrieves all members of the sorted set bigspenders that have scores greater than zero. The top five or fewer are returned to provide the UI pie chart with data. The labels array contains the names of the biggest spenders and the series array contains the numeric values associated with each member name.

    app/routers/transaction-router.js
    const SORTED_SET_KEY = 'bigspenders';

    /* fetch top 5 biggest spenders */
    transactionRouter.get('/biggestspenders', async (req, res) => {
    const range = await redis.zRangeByScoreWithScores(
    SORTED_SET_KEY,
    0,
    Infinity,
    );
    let series = [];
    let labels = [];

    range.slice(0, 5).forEach((spender) => {
    series.push(parseFloat(spender.score.toFixed(2)));
    labels.push(spender.value);
    });

    res.send({ series, labels });
    });

    Search existing transactions

    Dashboard widget

    Search transactions

    API end point

    Endpoint/transaction/search
    Code Location/routers/transaction-router.js
    Query Parametersterm
    Return valuearray of results matching term

    The search endpoint leverages Search and Query, It receives a term query parameter from the UI. A Redis om Node query for the fields description, fromAccountName, and accountType will trigger and return results.

    app/routers/transaction-router.js
    transactionRouter.get('/search', async (req, res) => {
    const term = req.query.term;

    let results;

    if (term.length >= 3) {
    results = await bankRepo
    .search()
    .where('description')
    .matches(term)
    .or('fromAccountName')
    .matches(term)
    .or('transactionType')
    .equals(term)
    .return.all({ pageSize: 1000 });
    }
    res.send(results);
    });

    Get recent transactions

    Dashboard widget

    View recent transactions

    API end point

    Endpoint/transaction/transactions
    Code Location/routers/transaction-router.js
    Parametersnone
    Return valuearray of results

    Even the transactions endpoint leverages Search and Query. A Redis om Node query will trigger and return ten most recent transactions.

    app/routers/transaction-router.js
    /* return ten most recent transactions */
    transactionRouter.get('/transactions', async (req, res) => {
    const transactions = await bankRepo
    .search()
    .sortBy('transactionDate', 'DESC')
    .return.all({ pageSize: 10 });

    res.send(transactions.slice(0, 10));
    });

    Ready to use Redis in account dashboard?

    Hopefully, this tutorial has helped you visualize how to use Redis for account dashboard, specifically in the context of mobile banking. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html b/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html index 29348ff968..54e39a84ff 100644 --- a/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html +++ b/howtos/solutions/mobile-banking/common-mb/additional-resources/index.html @@ -4,7 +4,7 @@ additional-resources | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html b/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html index 763775e9f1..580b59b112 100644 --- a/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html +++ b/howtos/solutions/mobile-banking/common-mb/data-seeding/index.html @@ -4,7 +4,7 @@ data-seeding | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    data-seeding

    This application leverages Redis core data structures, JSON, TimeSeries, Search and Query features. The data seeded is later used to show a searchable transaction overview with realtime updates as well as a personal finance management overview with realtime balance and biggest spenders updates.

    On application startup in app/server.js, a cron is scheduled to create random bank transactions at regular intervals and seed those transactions in to Redis.

    app/server.js
    //cron job to trigger createBankTransaction() at regular intervals

    cron.schedule('*/10 * * * * *', async () => {
    const userName = process.env.REDIS_USERNAME;

    createBankTransaction(userName);

    //...
    });
    • The transaction generator creates a randomized banking debit or credit which will reflect on a (default) starting user balance of $100,000.00
    • The transaction data is saved as a JSON document within Redis.
    • To capture balance over time, the balanceAfter value is recorded in a TimeSeries with the key balance_ts for every transaction.
    • To track biggest spenders, an associated fromAccountName member within the sorted set bigspenders is incremented by the transaction amount. Note that this amount can be positive or negative.
    app/transactions/transactionsGenerator.js
    let balance = 100000.0;
    const BALANCE_TS = 'balance_ts';
    const SORTED_SET_KEY = 'bigspenders';

    export const createBankTransaction = async () => {
    //to create random bank transaction
    let vendorsList = source.source; //app/transactions/transaction_sources.js
    const random = Math.floor(Math.random() * 9999999999);

    const vendor = vendorsList[random % vendorsList.length]; //random vendor from the list

    const amount = createTransactionAmount(vendor.fromAccountName, random);
    const transaction = {
    id: random * random,
    fromAccount: Math.floor((random / 2) * 3).toString(),
    fromAccountName: vendor.fromAccountName,
    toAccount: '1580783161',
    toAccountName: 'bob',
    amount: amount,
    description: vendor.description,
    transactionDate: new Date(),
    transactionType: vendor.type,
    balanceAfter: balance,
    };

    //redis json feature
    const bankTransaction = await bankTransactionRepository.save(transaction);
    console.log('Created bankTransaction!');
    // ...
    };

    const createTransactionAmount = (vendor, random) => {
    let amount = createAmount(); //random amount
    balance += amount;
    balance = parseFloat(balance.toFixed(2));

    //redis time series feature
    redis.ts.add(BALANCE_TS, '*', balance, { DUPLICATE_POLICY: 'first' });
    //redis sorted set as secondary index
    redis.zIncrBy(SORTED_SET_KEY, amount * -1, vendor);

    return amount;
    };

    Sample bankTransaction data view using RedisInsight

    bank transaction data

    bank transaction json

    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html b/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html index f9ba69a2f6..1e1850abbe 100644 --- a/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html +++ b/howtos/solutions/mobile-banking/common-mb/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/mobile-banking/session-management/index.html b/howtos/solutions/mobile-banking/session-management/index.html index 4d127e9398..18694b8ba4 100644 --- a/howtos/solutions/mobile-banking/session-management/index.html +++ b/howtos/solutions/mobile-banking/session-management/index.html @@ -4,7 +4,7 @@ Mobile Banking Authentication and Session Storage Using Redis | The Home of Redis Developers - + @@ -18,7 +18,7 @@ browser cookie entry

    Now on every other API request from client, connect-redis-stack library makes sure to load session details from redis to req.session variable based on the client cookie (sessionId).

    Balance API (Session storage)

    Consider the below /transaction/balance API code to demonstrate session storage.

    We have to modify the req.session variable to update session data. Let's add more session data like current balance amount of the user .

    app/routers/transaction-router.js
    /* fetch all transactions up to an hour ago /transaction/balance */
    transactionRouter.get('/balance', async (req, res) => {
    const balance = await redis.ts.range(
    BALANCE_TS,
    Date.now() - 1000 * 60 * 5,
    Date.now(),
    );

    let balancePayload = balance.map((entry) => {
    return {
    x: entry.timestamp,
    y: entry.value,
    };
    });

    let session = req.session;
    if (session.userid && balancePayload.length) {
    //adding latest BalanceAmount to session
    session.currentBalanceAmount = balancePayload[balancePayload.length - 1]; //updating session data
    }

    res.send(balancePayload);
    });
    • Updated session entry in Redis with currentBalanceAmount field ('x' denoting timestamp and 'y' denoting balance amount at that timestamp) session update

    • Verify the latest balance amount in the Dashboard UI

      dashboard balance

    Ready to use Redis in session management?

    Hopefully, this tutorial has helped you visualize how to use Redis for better session management, specifically in the context of mobile banking. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html index 95ae97ffa2..0cedda80bd 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-many-skus/index.html @@ -4,7 +4,7 @@ decrement-many-skus | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    decrement-many-skus

    The code that follows shows an example API request and response for decrementManySKUs activity.

    decrementManySKUs API Request

    POST http://localhost:3000/api/decrementManySKUs
    [{
    "sku":1019688,
    "quantity":4
    },{
    "sku":1003622,
    "quantity":2
    },{
    "sku":1006702,
    "quantity":2
    }]

    decrementManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 28 //previous value 32
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 8 //previous value 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 8 //previous value 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementManySKUs function which looks as follows:

    src/inventory-service.ts
     static async decrementManySKUs(_productsFilter: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    decrement quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    let retItems: IProduct[] = [];

    if (_productsFilter && _productsFilter.length) {
    //validation only
    const promArr: Promise<boolean>[] = [];
    for (let p of _productsFilter) {
    if (p.sku) {
    //validating if all products in stock
    const promObj = InventoryServiceCls.validateQuantityOnDecrementSKU(p.sku, p.quantity);
    promArr.push(promObj)
    }
    }
    await Promise.all(promArr);

    //decrement only
    const promArr2: Promise<IProduct>[] = [];
    for (let p of _productsFilter) {
    if (p.sku && p.quantity) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = false;
    const promObj2 = InventoryServiceCls.incrementSKU(p.sku, p.quantity, isDecrement, isReturnProduct);
    promArr2.push(promObj2)
    }
    }
    await Promise.all(promArr2);


    //retrieve updated products
    retItems = await InventoryServiceCls.retrieveManySKUs(_productsFilter);
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html index d97a0eb259..f47049c163 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/decrement-sku/index.html @@ -4,7 +4,7 @@ decrement-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    decrement-sku

    The code that follows shows an example API request and response for decrementSKU activity.

    decrementSKU API Request

    POST http://localhost:3000/api/decrementSKU
    {
    "sku":1019688,
    "quantity":4
    }

    decrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 16 //previous value 20
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementSKU function which looks as follows:

    src/inventory-service.ts
    static async decrementSKU(_productId: number, _decrQuantity: number): Promise<IProduct> {
    /**
    decrement quantity of a Product.

    :param _productId: Product Id
    :param _decrQuantity: new decrement quantity
    :return: Product with Quantity
    */
    let retItem: IProduct = {};

    //validating if product in stock
    let isValid = await InventoryServiceCls.validateQuantityOnDecrementSKU(_productId, _decrQuantity);

    if (isValid) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = true;
    retItem = await InventoryServiceCls.incrementSKU(_productId, _decrQuantity, isDecrement, isReturnProduct);
    }

    return retItem;
    }

    static async validateQuantityOnDecrementSKU(_productId: number, _decrQuantity?: number): Promise<boolean> {
    let isValid = false;

    if (!_decrQuantity) {
    _decrQuantity = 1;
    }

    if (_productId) {
    const product = await InventoryServiceCls.retrieveSKU(_productId);
    if (product && product.totalQuantity && product.totalQuantity > 0
    && (product.totalQuantity - _decrQuantity >= 0)) {

    isValid = true;
    }
    else {
    throw `For product with Id ${_productId}, available quantity(${product.totalQuantity}) is lesser than decrement quantity(${_decrQuantity})`;
    }

    }
    return isValid;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html index ff2567ba14..6bf417b148 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/increment-sku/index.html @@ -4,7 +4,7 @@ increment-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    increment-sku

    The code that follows shows an example API request and response for incrementSKU activity.

    incrementSKU API Request

    POST http://localhost:3000/api/incrementSKU
    {
    "sku":1019688,
    "quantity":2
    }

    incrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 12 //previous value 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a incrementSKU function which looks as follows:

    src/inventory-service.ts
    static async incrementSKU(_productId: number, _incrQuantity: number, _isDecrement: boolean, _isReturnProduct: boolean): Promise<IProduct> {
    /**
    increment quantity of a Product.

    :param _productId: Product Id
    :param _incrQuantity: new increment quantity
    :return: Product with Quantity
    */

    const redisOmClient = getRedisOmClient();
    let retItem: IProduct = {};

    if (!_incrQuantity) {
    _incrQuantity = 1;
    }
    if (_isDecrement) {
    _incrQuantity = _incrQuantity * -1;
    }
    if (redisOmClient && _productId && _incrQuantity) {

    const updateKey = `${ProductRepo.PRODUCT_KEY_PREFIX}:${_productId}`;

    //increment json number field by specific (positive/ negative) value
    await redisOmClient.redis?.json.numIncrBy(updateKey, '$.totalQuantity', _incrQuantity);

    if (_isReturnProduct) {
    retItem = await InventoryServiceCls.retrieveSKU(_productId);
    }

    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html index 8ff648ecdd..609695a89a 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-many-skus/index.html @@ -4,7 +4,7 @@ retrieve-many-skus | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    retrieve-many-skus

    The code that follows shows an example API request and response for retrieveManySKUs activity.

    retrieveManySKUs API Request

    POST http://localhost:3000/api/retrieveManySKUs
    [{
    "sku":1019688
    },{
    "sku":1003622
    },{
    "sku":1006702
    }]

    retrieveManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 24
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveManySKUs function which looks as follows:

    src/inventory-service.ts
    static async retrieveManySKUs(_productWithIds: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    Get current Quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    const repository = ProductRepo.getRepository();
    let retItems: IProduct[] = [];

    if (repository && _productWithIds && _productWithIds.length) {

    //string id array
    const idArr = _productWithIds.map((product) => {
    return product.sku?.toString() || ""
    });

    //fetch products by IDs (using redis om library)
    const result = await repository.fetch(...idArr);

    let productsArr: IProduct[] = [];

    if (idArr.length == 1) {
    productsArr = [<IProduct>result];
    }
    else {
    productsArr = <IProduct[]>result;
    }

    if (productsArr && productsArr.length) {

    retItems = productsArr.map((product) => {
    return {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    });
    }
    else {
    throw `No products found !`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html index 6f04b69484..d0427eedd0 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/retrieve-sku/index.html @@ -4,7 +4,7 @@ retrieve-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    retrieve-sku

    The code that follows shows an example API request and response for retrieveSKU activity.

    retrieveSKU API Request

    GET http://localhost:3000/api/retrieveSKU?sku=1019688

    retrieveSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveSKU function which looks as follows:

    code

    src/inventory-service.ts

    static async retrieveSKU(_productId: number): Promise<IProduct> {
    /**
    Get current Quantity of a Product.

    :param _productId: Product Id
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    retItem = {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html b/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html index bcadbaeb22..c5f53ba3fa 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/api/update-sku/index.html @@ -4,7 +4,7 @@ update-sku | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    update-sku

    The code that follows shows an example API request and response for updateSKU activity.

    updateSKU API Request

    POST http://localhost:3000/api/updateSKU
    {
    "sku":1019688,
    "quantity":25
    }

    updateSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 25 //updated value
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a updateSKU function which looks as follows:

    src/inventory-service.ts
     static async updateSKU(_productId: number, _quantity: number): Promise<IProduct> {
    /**
    Set Quantity of a Product.

    :param _productId: Product Id
    :param _quantity: new quantity
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId && _quantity >= 0) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    //update the product fields
    product.totalQuantity = _quantity;

    // save the modified product
    const savedItem = <IProduct>await repository.save(<RedisEntity>product);

    retItem = {
    sku: savedItem.sku,
    name: savedItem.name,
    type: savedItem.type,
    totalQuantity: savedItem.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/available-to-promise/index.html b/howtos/solutions/real-time-inventory/available-to-promise/index.html index 0908fba6b4..add4d8c044 100644 --- a/howtos/solutions/real-time-inventory/available-to-promise/index.html +++ b/howtos/solutions/real-time-inventory/available-to-promise/index.html @@ -4,7 +4,7 @@ Available to Promise in Real-time Inventory Using Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Available to Promise in Real-time Inventory Using Redis


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    What is available-to-promise (ATP)?

    The major requirement in a retail inventory system is presenting an accurate, real-time view of inventory to shoppers and store associates enabling buy-online-pickup-in-store (BOPIS). Optimizing fulfillment from multiple inventory locations.

    Available to promise (ATP) is the projected amount of inventory left available to sell, not including allocated inventory. It allows businesses to control distribution to their customers and predict inventory. The ATP model helps retailers keep inventory costs down such as ordering costs, carrying costs and stock-out costs. ATP is helpful as long as consumer buying forecasts remain correct. Implementing ATP processes effectively for retailers can mean the difference between sustained growth and an inventory that repeatedly runs out of customer's favorite products missing sales opportunities and harming customer experience.

    How to calculate available-to-promise

    Calculating available-to-promise is a relatively simple undertaking. Complete the following formula for an accurate breakdown of available-to-promise capabilities:

    Available-to-promise = QuantityOnHand + Supply - Demand

    This formula includes the following elements:

    • QuantityOnHand: the total number of products that are immediately available to a company
    • Supply: the total stock of a product available for sale
    • Demand: the amount of a specific product that consumers are willing to purchase

    Current challenges in real time inventory

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    Why you should use Redis for available-to-promise

    • Increased inventory visibility: Redis Enterprise provides highly scalable, real-time inventory synchronization between stores providing views into what stock is Available-To-Promise. Customers want to buy from a retailer who can check stock across multiple locations and provide real-time views on what's available locally.

    • Enhanced customer experience: Sub-millisecond latency means online customers can easily get real-time views of shopping carts, pricing, and in stock availability. Redis Enterprise built-in search engine delivers full text and aggregated faceted search of inventory in real time, scaling performance to instantly search inventories with millions of product types helping customers fill their shopping carts faster, keeping them engaged and loyal.

    • Cost efficiency at scale: Redis Enterprise offers real-time, bi-directional consistency between stores and data integration capabilities with enterprise systems without the complexity and costs of managing message brokers, auditing, and reconciliation.

    Real time inventory with Redis

    atp

    Using Redis, System delivers real-time synchronization of inventory across stores, in transit and warehouses. Provide retailers the most accurate, timely data on inventory across their entire store network and consumers positive customer experiences searching and locating inventory.

    Redis Data Integration (RDI) capabilities enable accurate real-time inventory management and system of record synchronization. Redis advanced inventory search and query capabilities provide accurate available inventory information to multichannel and omnichannel customers and store associates.

    This solution increases inventory turnover ratios resulting in lower inventory costs, higher revenue and profits. It also reduces the impact of customer searches on Systems of Record and Inventory Management Systems (IMS).

    Customer proof points

    Building a real time inventory service with redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Managing inventory or a SKU (stock keeping unit) process contains some activities like :

    1. RetrieveSKU : Fetch the current quantity of a product
    2. UpdateSKU : Update the latest quantity of a product
    3. IncrementSKU : Increment the quantity by a specific value (Say, when more products are procured)
    4. DecrementSKU : Decrement the quantity by a specific value (Say, after order fulfillment of the product)
    5. RetrieveManySKUs : Fetch the current quantity of multiple products (Say, to verify products in stock before payment)
    6. DecrementManySKUs: Decrement the quantity of multiple products (Say, after an order fulfillment with multiple products)

    RetrieveSKU

    The code that follows shows an example API request and response for retrieveSKU activity.

    retrieveSKU API Request

    GET http://localhost:3000/api/retrieveSKU?sku=1019688

    retrieveSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveSKU function which looks as follows:

    code

    src/inventory-service.ts

    static async retrieveSKU(_productId: number): Promise<IProduct> {
    /**
    Get current Quantity of a Product.

    :param _productId: Product Id
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    retItem = {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    UpdateSKU

    The code that follows shows an example API request and response for updateSKU activity.

    updateSKU API Request

    POST http://localhost:3000/api/updateSKU
    {
    "sku":1019688,
    "quantity":25
    }

    updateSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 25 //updated value
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a updateSKU function which looks as follows:

    src/inventory-service.ts
     static async updateSKU(_productId: number, _quantity: number): Promise<IProduct> {
    /**
    Set Quantity of a Product.

    :param _productId: Product Id
    :param _quantity: new quantity
    :return: Product with Quantity
    */
    const repository = ProductRepo.getRepository();
    let retItem: IProduct = {};

    if (repository && _productId && _quantity >= 0) {
    //fetch product by ID (using redis om library)
    const product = <IProduct>await repository.fetch(_productId.toString());

    if (product) {
    //update the product fields
    product.totalQuantity = _quantity;

    // save the modified product
    const savedItem = <IProduct>await repository.save(<RedisEntity>product);

    retItem = {
    sku: savedItem.sku,
    name: savedItem.name,
    type: savedItem.type,
    totalQuantity: savedItem.totalQuantity
    }
    }
    else {
    throw `Product with Id ${_productId} not found`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    IncrementSKU

    The code that follows shows an example API request and response for incrementSKU activity.

    incrementSKU API Request

    POST http://localhost:3000/api/incrementSKU
    {
    "sku":1019688,
    "quantity":2
    }

    incrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 12 //previous value 10
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a incrementSKU function which looks as follows:

    src/inventory-service.ts
    static async incrementSKU(_productId: number, _incrQuantity: number, _isDecrement: boolean, _isReturnProduct: boolean): Promise<IProduct> {
    /**
    increment quantity of a Product.

    :param _productId: Product Id
    :param _incrQuantity: new increment quantity
    :return: Product with Quantity
    */

    const redisOmClient = getRedisOmClient();
    let retItem: IProduct = {};

    if (!_incrQuantity) {
    _incrQuantity = 1;
    }
    if (_isDecrement) {
    _incrQuantity = _incrQuantity * -1;
    }
    if (redisOmClient && _productId && _incrQuantity) {

    const updateKey = `${ProductRepo.PRODUCT_KEY_PREFIX}:${_productId}`;

    //increment json number field by specific (positive/ negative) value
    await redisOmClient.redis?.json.numIncrBy(updateKey, '$.totalQuantity', _incrQuantity);

    if (_isReturnProduct) {
    retItem = await InventoryServiceCls.retrieveSKU(_productId);
    }

    }
    else {
    throw `Input params failed !`;
    }

    return retItem;
    }

    DecrementSKU

    The code that follows shows an example API request and response for decrementSKU activity.

    decrementSKU API Request

    POST http://localhost:3000/api/decrementSKU
    {
    "sku":1019688,
    "quantity":4
    }

    decrementSKU API Response

    {
    "data": {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 16 //previous value 20
    },
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementSKU function which looks as follows:

    src/inventory-service.ts
    static async decrementSKU(_productId: number, _decrQuantity: number): Promise<IProduct> {
    /**
    decrement quantity of a Product.

    :param _productId: Product Id
    :param _decrQuantity: new decrement quantity
    :return: Product with Quantity
    */
    let retItem: IProduct = {};

    //validating if product in stock
    let isValid = await InventoryServiceCls.validateQuantityOnDecrementSKU(_productId, _decrQuantity);

    if (isValid) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = true;
    retItem = await InventoryServiceCls.incrementSKU(_productId, _decrQuantity, isDecrement, isReturnProduct);
    }

    return retItem;
    }

    static async validateQuantityOnDecrementSKU(_productId: number, _decrQuantity?: number): Promise<boolean> {
    let isValid = false;

    if (!_decrQuantity) {
    _decrQuantity = 1;
    }

    if (_productId) {
    const product = await InventoryServiceCls.retrieveSKU(_productId);
    if (product && product.totalQuantity && product.totalQuantity > 0
    && (product.totalQuantity - _decrQuantity >= 0)) {

    isValid = true;
    }
    else {
    throw `For product with Id ${_productId}, available quantity(${product.totalQuantity}) is lesser than decrement quantity(${_decrQuantity})`;
    }

    }
    return isValid;
    }

    RetrieveManySKUs

    The code that follows shows an example API request and response for retrieveManySKUs activity.

    retrieveManySKUs API Request

    POST http://localhost:3000/api/retrieveManySKUs
    [{
    "sku":1019688
    },{
    "sku":1003622
    },{
    "sku":1006702
    }]

    retrieveManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 24
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a retrieveManySKUs function which looks as follows:

    src/inventory-service.ts
    static async retrieveManySKUs(_productWithIds: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    Get current Quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    const repository = ProductRepo.getRepository();
    let retItems: IProduct[] = [];

    if (repository && _productWithIds && _productWithIds.length) {

    //string id array
    const idArr = _productWithIds.map((product) => {
    return product.sku?.toString() || ""
    });

    //fetch products by IDs (using redis om library)
    const result = await repository.fetch(...idArr);

    let productsArr: IProduct[] = [];

    if (idArr.length == 1) {
    productsArr = [<IProduct>result];
    }
    else {
    productsArr = <IProduct[]>result;
    }

    if (productsArr && productsArr.length) {

    retItems = productsArr.map((product) => {
    return {
    sku: product.sku,
    name: product.name,
    type: product.type,
    totalQuantity: product.totalQuantity
    }
    });
    }
    else {
    throw `No products found !`;
    }
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }

    DecrementManySKUs

    The code that follows shows an example API request and response for decrementManySKUs activity.

    decrementManySKUs API Request

    POST http://localhost:3000/api/decrementManySKUs
    [{
    "sku":1019688,
    "quantity":4
    },{
    "sku":1003622,
    "quantity":2
    },{
    "sku":1006702,
    "quantity":2
    }]

    decrementManySKUs API Response

    {
    "data": [
    {
    "sku": 1019688,
    "name": "5-Year Protection Plan - Geek Squad",
    "type": "BlackTie",
    "totalQuantity": 28 //previous value 32
    },
    {
    "sku": 1003622,
    "name": "Aquarius - Fender Stratocaster 1,000-Piece Jigsaw Puzzle - Black/Red/White/Yellow/Green/Orange/Blue",
    "type": "HardGood",
    "totalQuantity": 8 //previous value 10
    },
    {
    "sku": 1006702,
    "name": "Clash of the Titans [DVD] [2010]",
    "type": "Movie",
    "totalQuantity": 8 //previous value 10
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling a decrementManySKUs function which looks as follows:

    src/inventory-service.ts
     static async decrementManySKUs(_productsFilter: IProductBodyFilter[]): Promise<IProduct[]> {
    /**
    decrement quantity of specific Products.

    :param _productWithIds: Product list with Id
    :return: Product list
    */
    let retItems: IProduct[] = [];

    if (_productsFilter && _productsFilter.length) {
    //validation only
    const promArr: Promise<boolean>[] = [];
    for (let p of _productsFilter) {
    if (p.sku) {
    //validating if all products in stock
    const promObj = InventoryServiceCls.validateQuantityOnDecrementSKU(p.sku, p.quantity);
    promArr.push(promObj)
    }
    }
    await Promise.all(promArr);

    //decrement only
    const promArr2: Promise<IProduct>[] = [];
    for (let p of _productsFilter) {
    if (p.sku && p.quantity) {
    const isDecrement = true; //increments with negative value
    const isReturnProduct = false;
    const promObj2 = InventoryServiceCls.incrementSKU(p.sku, p.quantity, isDecrement, isReturnProduct);
    promArr2.push(promObj2)
    }
    }
    await Promise.all(promArr2);


    //retrieve updated products
    retItems = await InventoryServiceCls.retrieveManySKUs(_productsFilter);
    }
    else {
    throw `Input params failed !`;
    }

    return retItems;
    }

    Ready to use Redis in a Real time inventory system?

    Hopefully, this tutorial has helped you visualize how to use Redis in a Real time inventory system for product availability across different location stores. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html b/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html index 9c88835591..9bbd239139 100644 --- a/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/additional-resources/index.html @@ -4,7 +4,7 @@ additional-resources | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html b/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html index 49ba4af2f3..b1ec1f9ddf 100644 --- a/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/customer-proofs/index.html @@ -4,7 +4,7 @@ customer-proofs | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html b/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html index 41f126358e..14e8678d55 100644 --- a/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/rti-challenges/index.html @@ -4,7 +4,7 @@ rti-challenges | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    rti-challenges

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html b/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html index 2caef1ef14..1f00463214 100644 --- a/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html +++ b/howtos/solutions/real-time-inventory/common-rti/source-code-tip/index.html @@ -4,7 +4,7 @@ source-code-tip | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html index 60d9ff2e1d..95cbeb7a56 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search-with-distance/index.html @@ -4,7 +4,7 @@ inventory-search-with-distance | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    inventory-search-with-distance

    The code that follows shows an example API request and response for inventorySearchWithDistance API:

    inventorySearchWithDistance API Request

    POST http://localhost:3000/api/inventorySearchWithDistance
    {
    "sku": 1019688,
    "searchRadiusInKm": 500,
    "userLocation": {
    "latitude": 42.88023,
    "longitude": -78.878738
    }
    }

    inventorySearchWithDistance API Response

    inventorySearchWithDistance API Response
    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": "1019688",
    "quantity": "38",
    "distInKm": "107.74513"
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": "1019688",
    "quantity": "31",
    "distInKm": "268.86249"
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": "1019688",
    "quantity": "45",
    "distInKm": "427.90787"
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearchWithDistance function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius, Also sort results by distance from current user location to store.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearchWithDistance(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    const indexName = `${StoresInventoryRepo.STORES_INVENTORY_KEY_PREFIX}:index`;
    const aggregator = await nodeRedisClient.ft.aggregate(
    indexName,
    queryBuilder.query,
    {
    LOAD: ["@storeId", "@storeLocation", "@sku", "@quantity"],
    STEPS: [{
    type: AggregateSteps.APPLY,
    expression: `geodistance(@storeLocation, ${long}, ${lat})/1000`,
    AS: 'distInKm'
    }, {
    type: AggregateSteps.SORTBY,
    BY: "@distInKm"
    }]
    });

    /* Sample command to run query directly on CLI
    FT.AGGREGATE StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )' LOAD 4 @storeId @storeLocation @sku @quantity APPLY "geodistance(@storeLocation,-78.878738,42.88043)/1000" AS distInKm SORTBY 1 @distInKm
    */

    retItems = <IStoresInventory[]>aggregator.results;

    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    else {
    retItems = retItems.map((item) => {
    if (typeof item.storeLocation == "string") {
    const location = item.storeLocation.split(",");
    item.storeLocation = {
    longitude: Number(location[0]),
    latitude: Number(location[1]),
    }
    }
    return item;
    })
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html index b2c564b5bc..ad82ddf90a 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/api/inventory-search/index.html @@ -4,7 +4,7 @@ inventory-search | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    inventory-search

    The code that follows shows an example API request and response for the inventorySearch API:

    inventorySearch API Request

    POST http://localhost:3000/api/inventorySearch
    {
    "sku":1019688,
    "searchRadiusInKm":500,
    "userLocation": {
    "latitude": 42.880230,
    "longitude": -78.878738
    }
    }

    inventorySearch API Response

    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": 1019688,
    "quantity": 38
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": 1019688,
    "quantity": 31
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": 1019688,
    "quantity": 45
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearch function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearch(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    retItems = <IStoresInventory[]>await queryBuilder.return.all();

    /* Sample command to run query directly on CLI
    FT.SEARCH StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )'
    */


    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }
    - + \ No newline at end of file diff --git a/howtos/solutions/real-time-inventory/local-inventory-search/index.html b/howtos/solutions/real-time-inventory/local-inventory-search/index.html index c2e79a3b53..ab4ed293d7 100644 --- a/howtos/solutions/real-time-inventory/local-inventory-search/index.html +++ b/howtos/solutions/real-time-inventory/local-inventory-search/index.html @@ -4,7 +4,7 @@ Real-time Local Inventory Search Using Redis | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    Real-time Local Inventory Search Using Redis


    Profile picture for Prasan Kumar
    Author:
    Prasan Kumar, Technical Solutions Developer at Redis
    Profile picture for Will Johnston
    Author:
    Will Johnston, Developer Growth Manager at Redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Real-time local inventory search is a method of utilizing advanced product search capabilities across a group of stores or warehouses in a region or geographic area by which a retailer can enhance the customer experience with a localized view of inventory while fulfilling orders from the closest store possible.

    Geospatial search of merchandise local to the consumer helps sell stock faster, lowers inventory levels, and thus increases inventory turnover ratio. Consumers locate a product online, place the order in their browser or mobile device, and pick up at nearest store location. This is called “buy-online-pickup-in-store” (BOPIS)

    Current challenges in real time inventory

    • Over and under-stocking: While adopting a multi-channel business model (online & in store), lack of inventory visibility results in over and under-stocking of inventory in different regions and stores.

    • Consumers seek convenience: The ability to search across regional store locations and pickup merchandise immediately rather than wait for shipping is a key differentiator for retailers.

    • Consumers seek speed: All retailers, even small or family-run, must compete against the customer experience of large online retailers like Alibaba, FlipKart, Shopee, and Amazon.

    • High inventory costs: Retailers seek to lower inventory costs by eliminating missed sales from out-of-stock scenarios which also leads to higher “inventory turnover ratios.”

    • Brand value: Inaccurate store inventory counts lead to frustrated customers and lower sales. The operational pain will impact the status quo.

    • Accurate location/regional inventory search: Redis Enterprise geospatial search capabilities enable retailers to provide local inventories by store location across geographies and regions based on a consumer's location. This enables a real-time view of store inventory and and seamless BOPIS shopping experience.

    • Consistent and accurate inventory view across multichannel and omnichannel experiences: Accurate inventory information no matter what channel the shopper is using, in-store, kiosk, online, or mobile. Redis Enterprise provides a single source of truth for inventory information across all channels.

    • Real-time search performance at scale: Redis Enterprise real-time search and query engine allows retailers to provide instant application and inventory search responses and scale performance effortlessly during peak periods.

    Real-time local inventory search with Redis

    local-search

    Redis provides geospatial search capabilities across a group of stores or warehouses in a region or geographic area allowing a retailer to quickly show the available inventory local to the customer.

    Redis Enterprise processes event streams, keeping store inventories up-to-date in real-time. This enhances the customer experience with localized, accurate search of inventory while fulfilling orders from the nearest and fewest stores possible.

    This solution lowers days sales of inventory (DSI), selling inventory faster and carrying less inventory for increased revenue generation and profits over a shorter time period.

    It also reduces fulfillment costs to home and local stores enhancing a retailer's ability to fulfill orders with the lowest delivery and shipping costs.

    Customer proof points

    Building a real time local inventory search with redis

    GITHUB CODE

    Below is a command to the clone the source code for the application used in this tutorial

    git clone https://github.com/redis-developer/redis-real-time-inventory-solutions

    Setting up the data

    Once the application source code is downloaded, run following commands to populate data in Redis:

    # install packages
    npm install

    # Seed data to Redis
    npm run seed

    The demo uses two collections:

    • Product collection: Stores product details like productId, name, price, image, and other details product data
    tip

    Download RedisInsight to view your Redis data or to play with raw Redis commands in the workbench.

    • StoresInventory collection: Stores product quantity available at different local stores.

    For demo purpose, we are using the below regions in New York, US as store locations. Products are mapped to these location stores with a storeId and quantity.

    Regions in NewYork State

    inventory data

    Let's build the following APIs to demonstrate geospatial search using Redis:

    • InventorySearch API: Search Products in local stores within a search radius.
    • InventorySearchWithDistance API: Search Product in local stores within search radius and sort results by distance from current user location to store.

    InventorySearch API

    The code that follows shows an example API request and response for the inventorySearch API:

    inventorySearch API Request

    POST http://localhost:3000/api/inventorySearch
    {
    "sku":1019688,
    "searchRadiusInKm":500,
    "userLocation": {
    "latitude": 42.880230,
    "longitude": -78.878738
    }
    }

    inventorySearch API Response

    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": 1019688,
    "quantity": 38
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": 1019688,
    "quantity": 31
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": 1019688,
    "quantity": 45
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearch function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearch(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    retItems = <IStoresInventory[]>await queryBuilder.return.all();

    /* Sample command to run query directly on CLI
    FT.SEARCH StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )'
    */


    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }

    InventorySearchWithDistance API

    The code that follows shows an example API request and response for inventorySearchWithDistance API:

    inventorySearchWithDistance API Request

    POST http://localhost:3000/api/inventorySearchWithDistance
    {
    "sku": 1019688,
    "searchRadiusInKm": 500,
    "userLocation": {
    "latitude": 42.88023,
    "longitude": -78.878738
    }
    }

    inventorySearchWithDistance API Response

    inventorySearchWithDistance API Response
    {
    "data": [
    {
    "storeId": "02_NY_ROCHESTER",
    "storeLocation": {
    "longitude": -77.608849,
    "latitude": 43.156578
    },
    "sku": "1019688",
    "quantity": "38",
    "distInKm": "107.74513"
    },
    {
    "storeId": "05_NY_WATERTOWN",
    "storeLocation": {
    "longitude": -75.910759,
    "latitude": 43.974785
    },
    "sku": "1019688",
    "quantity": "31",
    "distInKm": "268.86249"
    },
    {
    "storeId": "10_NY_POUGHKEEPSIE",
    "storeLocation": {
    "longitude": -73.923912,
    "latitude": 41.70829
    },
    "sku": "1019688",
    "quantity": "45",
    "distInKm": "427.90787"
    }
    ],
    "error": null
    }

    When you make a request, it goes through the API gateway to the inventory service. Ultimately, it ends up calling an inventorySearchWithDistance function which looks as follows:

    src/inventory-service.ts
    /**
    * Search Product in stores within search radius, Also sort results by distance from current user location to store.
    *
    * :param _inventoryFilter: Product Id (sku), searchRadiusInKm and current userLocation
    * :return: Inventory product list
    */
    static async inventorySearchWithDistance(_inventoryFilter: IInventoryBodyFilter): Promise<IStoresInventory[]> {
    const nodeRedisClient = getNodeRedisClient();

    const repository = StoresInventoryRepo.getRepository();
    let retItems: IStoresInventory[] = [];

    if (nodeRedisClient && repository && _inventoryFilter?.sku
    && _inventoryFilter?.userLocation?.latitude
    && _inventoryFilter?.userLocation?.longitude) {

    const lat = _inventoryFilter.userLocation.latitude;
    const long = _inventoryFilter.userLocation.longitude;
    const radiusInKm = _inventoryFilter.searchRadiusInKm || 1000;

    const queryBuilder = repository.search()
    .where('sku')
    .eq(_inventoryFilter.sku)
    .and('quantity')
    .gt(0)
    .and('storeLocation')
    .inRadius((circle) => {
    return circle
    .latitude(lat)
    .longitude(long)
    .radius(radiusInKm)
    .kilometers
    });

    console.log(queryBuilder.query);
    /* Sample queryBuilder query
    ( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )
    */

    const indexName = `${StoresInventoryRepo.STORES_INVENTORY_KEY_PREFIX}:index`;
    const aggregator = await nodeRedisClient.ft.aggregate(
    indexName,
    queryBuilder.query,
    {
    LOAD: ["@storeId", "@storeLocation", "@sku", "@quantity"],
    STEPS: [{
    type: AggregateSteps.APPLY,
    expression: `geodistance(@storeLocation, ${long}, ${lat})/1000`,
    AS: 'distInKm'
    }, {
    type: AggregateSteps.SORTBY,
    BY: "@distInKm"
    }]
    });

    /* Sample command to run query directly on CLI
    FT.AGGREGATE StoresInventory:index '( ( (@sku:[1019688 1019688]) (@quantity:[(0 +inf]) ) (@storeLocation:[-78.878738 42.88023 500 km]) )' LOAD 4 @storeId @storeLocation @sku @quantity APPLY "geodistance(@storeLocation,-78.878738,42.88043)/1000" AS distInKm SORTBY 1 @distInKm
    */

    retItems = <IStoresInventory[]>aggregator.results;

    if (!retItems.length) {
    throw `Product not found with in ${radiusInKm}km range!`;
    }
    else {
    retItems = retItems.map((item) => {
    if (typeof item.storeLocation == "string") {
    const location = item.storeLocation.split(",");
    item.storeLocation = {
    longitude: Number(location[0]),
    latitude: Number(location[1]),
    }
    }
    return item;
    })
    }
    }
    else {
    throw `Input params failed !`;
    }
    return retItems;
    }

    Hopefully this tutorial has helped you visualize how to use Redis for real-time local inventory search across different regional stores. For additional resources related to this topic, check out the links below:

    Additional resources

    - + \ No newline at end of file diff --git a/howtos/solutions/vector/getting-started-vector/index.html b/howtos/solutions/vector/getting-started-vector/index.html index dd318da161..20319a6244 100644 --- a/howtos/solutions/vector/getting-started-vector/index.html +++ b/howtos/solutions/vector/getting-started-vector/index.html @@ -4,7 +4,7 @@ How to Perform Vector Similarity Search Using Redis in NodeJS | The Home of Redis Developers - + @@ -22,7 +22,7 @@ in a given vector space. Higher values indicate higher similarity. However, the raw values can be large for long vectors; hence, normalization is recommended for better interpretation. If the vectors are normalized, their dot product will be 1 if they are identical and 0 if they are orthogonal (uncorrelated).

    Considering our product 1 and product 2, let's compute the Inner Product across all features.

    sample

    tip

    Vectors can also be stored in databases in binary formats to save space. In practical applications, it's crucial to strike a balance between the dimensionality of the vectors (which impacts storage and computational costs) and the quality or granularity of the information they capture.

    Further reading

    - + \ No newline at end of file diff --git a/index.html b/index.html index 2c141a8c3e..76a6d2915a 100644 --- a/index.html +++ b/index.html @@ -4,7 +4,7 @@ The Home of Redis Developers | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    The Home of
    Redis Developers

    Create

    Create a new database using cloud, Docker or from source

    Create a database
    Read More

    Develop

    Develop your application using your favorite language

    Code your application
    Read More

    Explore

    Insert,update and explore your database using RedisInsight

    Explore your data
    Read More

    Operate

    Provision Redis and accelerate app deployment using DevOps

    Operate your database


    Redis Launchpad

    Resources
    & community

    The latest from your favorite community to support your Redis journey

    Redis Pods
    Podcast

    How Redis scales Groww’s investing platform to empower 10 Million+ customers

    Read More
    - + \ No newline at end of file diff --git a/lp/learn-and-earn-jwt/index.html b/lp/learn-and-earn-jwt/index.html index efbe7ece2b..ff00f47a20 100644 --- a/lp/learn-and-earn-jwt/index.html +++ b/lp/learn-and-earn-jwt/index.html @@ -4,7 +4,7 @@ Learn and Earn with Redis! | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Learn and Earn with Redis!

    Complete this short assessment for a chance to earn a $25 Amazon gift card! If you need help, all of the answers can be found in this e-book.

    To receive your gift card, you must be a legal resident of any of the 50 United States and D.C., Canada, Germany, Ireland, France, and the United Kingdom;
    - + \ No newline at end of file diff --git a/lp/thank-you/index.html b/lp/thank-you/index.html index 3813d1f625..0ec3caed7d 100644 --- a/lp/thank-you/index.html +++ b/lp/thank-you/index.html @@ -4,7 +4,7 @@ Learn and Earn with Redis! | The Home of Redis Developers - + @@ -12,7 +12,7 @@ - + \ No newline at end of file diff --git a/modules/index-modules/index.html b/modules/index-modules/index.html index b6f8631ab8..e1ae1a4da4 100644 --- a/modules/index-modules/index.html +++ b/modules/index-modules/index.html @@ -4,7 +4,7 @@ index-modules | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /modules/ custom_edit_url:


    ~

    - + \ No newline at end of file diff --git a/modules/redisbloom/index.html b/modules/redisbloom/index.html index ae892e56dc..4df05a2aad 100644 --- a/modules/redisbloom/index.html +++ b/modules/redisbloom/index.html @@ -4,7 +4,7 @@ Probabilistic Data Structures | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Probabilistic Data Structures

    Redis Stack provides additional probabilistic data structures. It allows for solving computer science problems in a constant memory space with extremely fast processing and a low error rate. It supports scalable Bloom and Cuckoo filters to determine (with a specified degree of certainty) whether an item is present or absent from a collection.

    The four probabilistic data types:

    • Bloom filter: A probabilistic data structure that can test for presence. A Bloom filter is a data structure designed to tell you, rapidly and memory-efficiently, whether an element is present in a set. Bloom filters typically exhibit better performance and scalability when inserting items (so if you're often adding items to your dataset then Bloom may be ideal).
    • Cuckoo filter: An alternative to Bloom filters, Cuckoo filters comes with additional support for deletion of elements from a set. These filters are quicker on check operations.
    • Count-min sketch: A count-min sketch is generally used to determine the frequency of events in a stream. You can query the count-min sketch to get an estimate of the frequency of any given event.
    • Top-K: The Top-k probabilistic data structure is a deterministic algorithm that approximates frequencies for the top k items. With Top-K, you’ll be notified in real time whenever elements enter into or are expelled from your Top-K list. If an element add-command enters the list, the dropped element will be returned.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redisbloom

    Step 2. Create a database with Redis Stack

    Redisbloom

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Probabilistic Data Structures

    In the next steps you will use some basic commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.) To interact with Redis, you use the BF.ADD and BF.EXISTS commands.

    Let’s go ahead and test drive some probabilistic-specific operations. We will create a basic dataset based on unique visitors’ IP addresses, and you will see how to:

    • Create a Bloom filter
    • Determine whether or not an item exists in the Bloom filter
    • Add one or more items to the Bloom filter
    • Determine whether or not a unique visitor’s IP address exists

    Let’s walk through the process step-by-step:

    Create a Bloom filter

    Use the BF.ADD command to add a unique visitor IP address to the Bloom filter as shown here:

    >> BF.ADD unique_visitors 10.94.214.120
    (integer) 1
    (1.75s)

    Determine whether or not an item exists

    Use the BF.EXISTS command to determine whether or not an item may exist in the Bloom filter:

    >> BF.EXISTS unique_visitors 10.94.214.120
    (integer) 1
    >> BF.EXISTS unique_visitors 10.94.214.121
    (integer) 0
    (1.46s)

    In the above example, the first command shows the result as “1”, indicating that the item may exist, whereas the second command displays "0", indicating that the item certainly may not exist.

    Add one or more items to the Bloom filter

    Use the BF.MADD command to add one or more items to the Bloom filter, creating the filter if it does not yet exist. This command operates identically to BF.ADD, except it allows multiple inputs and returns multiple values:

    >> BF.MADD unique_visitors 10.94.214.100 10.94.214.200 10.94.214.210 10.94.214.212
    1) (integer) 1
    2) (integer) 1
    3) (integer) 1
    4) (integer) 1

    As shown above, the BF.MADD allows you to add one or more visitors’ IP addresses to the Bloom filter.

    Determine whether or not a unique visitor’s IP address exists

    Use BF.MEXISTS to determine if one or more items may exist in the filter or not:

    >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.212
    1) (integer) 1
    2) (integer) 1
     >> BF.MEXISTS unique_visitors 10.94.214.200 10.94.214.213
    1) (integer) 1
    2) (integer) 0

    In the above example, the first command shows the result as “1” for both the visitors’ IP addresses, indicating that these items do exist. The second command displays "0" for one of the visitor’s IP addresses, indicating that the item certainly does not exist.

    Next Steps

    • Learn more about Probabilistic data in the Quick Start tutorial.
    - + \ No newline at end of file diff --git a/modules/redisearch/index.html b/modules/redisearch/index.html index 01f4bc1a09..8bca14cc5c 100644 --- a/modules/redisearch/index.html +++ b/modules/redisearch/index.html @@ -4,7 +4,7 @@ Redis Search | The Home of Redis Developers - + @@ -16,7 +16,7 @@ release_year rating genre

    Before running queries on our new index, though, let’s take a closer look at the elements of the FT.CREATE command:

    • idx:movies: the name of the index, which you will use when doing queries
    • ON hash: the type of structure to be indexed. (Note that Redis Search 2.0 supports only the Hash structure, but this parameter will allow Redis Search to index other structures in the future.)
    • PREFIX 1 “movies:”: the prefix of the keys that should be indexed. This is a list, so since we want to index only movies:* keys the number is 1. If you want to index movies and TV shows with the same fields, you could use: PREFIX 2 “movies:” “tv_show:”
    • SCHEMA …: defines the schema, the fields, and their type to index. As you can see in the command, we are using TEXT, NUMERIC, and TAG, as well as SORTABLE parameters.

    The Redis Search 2.0 engine will scan the database using the PREFIX values, and update the index based on the schema definition. This makes it easy to add an index to an existing application that uses Hashes, there’s no need to change your code.

    Search the movies in the Redis Search index

    You can now use the FT.SEARCH to search your database, for example, to search all movies sorted by release year:

    >  FT.SEARCH idx:movies * SORTBY release_year ASC RETURN 2 title release_year
    1) (integer) 2
    2) "movies:1003"
    3) 1) "release_year"
    2) "1972"
    3) "title"
    4) "The Godfather"
    4) "movies:1002"
    5) 1) "release_year"
    2) "1980"
    3) "title"
    4) "Star Wars: Episode V - The Empire Strikes Back"

    You can also search “action” movies that contain “star” in the index (in our sample index, the term “star” will occur only in the title):

    >  FT.SEARCH idx:movies "star @genre:{action}" RETURN 2 title release_year
    1) (integer) 1
    2) "movies:1002"
    3) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "release_year"
    4) "1980"

    The FT.SEARCH command is the base command to search your database, it has many options and is associated with a powerful and rich query syntax that you can find in the documentation.

    tip

    You can also use the index to do data aggregation using the FT.AGGREGATE command.

    Next Steps

    - + \ No newline at end of file diff --git a/modules/redisgears/index.html b/modules/redisgears/index.html index cba85e035f..6da61b63a7 100644 --- a/modules/redisgears/index.html +++ b/modules/redisgears/index.html @@ -4,7 +4,7 @@ Triggers and Functions | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/modules/redisgraph/index.html b/modules/redisgraph/index.html index 0397a3782a..e1eb100480 100644 --- a/modules/redisgraph/index.html +++ b/modules/redisgraph/index.html @@ -4,7 +4,7 @@ RedisGraph | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    RedisGraph

    End-of-Life Notice

    Redis is phasing out RedisGraph. This blog post explains the motivation behind this decision and the implications for existing Redis customers and community members.

    End of support is scheduled for January 31, 2025.

    Beginning with Redis Stack 7.2.x-y, Redis Stack will no longer include graph capabilities (RedisGraph).

    RedisGraph is a Redis module that enables enterprises to process any kind of connected data much faster than with traditional relational or existing graph databases. RedisGraph implements a unique data storage and processing solution (with sparse-adjacency matrices and GraphBLAS) to deliver the fastest and most efficient way to store, manage, and process connected data in graphs. With RedisGraph, you can process complex transactions 10 - 600 times faster than with traditional graph solutions while using 50 - 60% less memory resources than other graph databases!

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    RedisGraph

    Step 2. Create a database with RedisGraph Module

    RedisGraph

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with RedisGraph

    In the following steps, we will use some basic RediGraph commands to insert data into a graph and then query the graph. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.)

    RedisGraph

    Step 5: Insert data into a graph

    Insert actors

    To interact with RedisGraph you will typically use the GRAPH.QUERY command and execute Cypher queries. Let’s start to insert some actors into the graph:movies graph name, which is automatically created using this command:

    >> GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Mark Hamill', actor_id:1}), (:Actor {name:'Harrison Ford', actor_id:2}), (:Actor {name:'Carrie Fisher', actor_id:3})"

    1) 1) "Labels added: 1"
    2) "Nodes created: 3"
    3) "Properties set: 6"
    4) "Query internal execution time: 0.675400 milliseconds"

    This single query creates three actors, along with their names and unique IDs.

    Insert a movie

    > GRAPH.QUERY graph:movies "CREATE (:Movie {title:'Star Wars: Episode V - The Empire Strikes Back', release_year: 1980 , movie_id:1})"
    1) 1) "Labels added: 1"
    2) "Nodes created: 1"
    3) "Properties set: 3"
    4) "Query internal execution time: 0.392300 milliseconds"

    This single query creates a movie with a title, the release year, and an ID.

    Associate actors and movies

    The core of a graph is the relationships between the nodes, allowing the applications to navigate and query them. Let’s create a relationship between the actors and the movies:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor),(m:Movie) WHERE a.actor_id = 1 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Luke Skywalker'}]->(m) RETURN r"
    1) 1) "r"
    2) 1) 1) 1) 1) "id"
    2) (integer) 1
    2) 1) "type"
    2) "Acted_in"
    3) 1) "src_node"
    2) (integer) 0
    4) 1) "dest_node"
    2) (integer) 3
    5) 1) "properties"
    2) 1) 1) "role"
    2) "Luke Skywalker"
    3) 1) "Properties set: 1"
    2) "Relationships created: 1"
    3) "Query internal execution time: 0.664800 milliseconds"

    This command created a new relation indicating that the actor Mark Hamill acted in Star Wars: Episode V as Luke Skywalker.

    Let’s repeat this process for the other actors:

    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 2 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Han Solo'}]->(m) RETURN r"
    > GRAPH.QUERY graph:movies "MATCH (a:Actor), (m:Movie) WHERE a.actor_id = 3 AND m.movie_id = 1 CREATE (a)-[r:Acted_in {role:'Princess Leila'}]->(m) RETURN r"

    You can also do all of this in a single query, for example:

    > GRAPH.QUERY graph:movies "CREATE (:Actor {name:'Marlo Brando', actor_id:4})-[:Acted_in {role:'Don Vito Corleone'}]->(:Movie {title:'The Godfather', release_year: 1972 , movie_id:2})"

    1) 1) "Nodes created: 2"
    2) "Properties set: 6"
    3) "Relationships created: 1"
    4) "Query internal execution time: 0.848500 milliseconds"

    Querying the graph

    Now that you have data in your graph, you’re ready to ask some questions, such as:

    “What are the titles of all the movies?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) RETURN m.title"

    1) 1) "m.title"
    2) 1) 1) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "The Godfather"
    3) 1) "Query internal execution time: 0.349400 milliseconds"

    “What is the information for the movie with the ID of 1?”

    > GRAPH.QUERY graph:movies "MATCH (m:Movie) WHERE m.movie_id = 1 RETURN m"

    1) 1) "m"
    2) 1) 1) 1) 1) "id"
    2) (integer) 3
    2) 1) "labels"
    2) 1) "Movie"
    3) 1) "properties"
    2) 1) 1) "title"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    2) 1) "release_year"
    2) (integer) 1980
    3) 1) "movie_id"
    2) (integer) 1
    3) 1) "Query internal execution time: 0.365800 milliseconds"

    “Who are the actors in the movie 'Star Wars: Episode V - The Empire Strikes Back' and what roles did they play?”

    > GRAPH.QUERY graph:movies "MATCH (a:Actor)-[r:Acted_in]-(m:Movie) WHERE m.movie_id = 1 RETURN a.name,m.title,r.role"
    1) 1) "a.name"
    2) "m.title"
    3) "r.role"
    2) 1) 1) "Mark Hamill"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Luke Skywalker"
    2) 1) "Harrison Ford"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Han Solo"
    3) 1) "Carrie Fisher"
    2) "Star Wars: Episode V - The Empire Strikes Back"
    3) "Princess Leila"
    3) 1) "Query internal execution time: 0.641200 milliseconds"

    Visualizing graph databases with RedisInsight

    If you are using RedisInsight, you can visualize and navigate into the nodes and relationships graphically. Click on the RedisGraph menu entry on the left and enter the query:

    MATCH (m:Actor) return m

    Click on the Execute button, and double click on the actors to follow the relationships You should see a graph like this one:

    RedisGraph

    Next Steps

    • Learn more about RedisGraph in the Quickstart tutorial.
    - + \ No newline at end of file diff --git a/modules/redisjson/index.html b/modules/redisjson/index.html index d5267689d1..41a8b133a4 100644 --- a/modules/redisjson/index.html +++ b/modules/redisjson/index.html @@ -4,7 +4,7 @@ Redis JSON | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis JSON

    Redis Stack provides in-memory manipulation of JSON documents at high velocity and volume. With Redis Stack, you can natively store document data in a hierarchical, tree-like format to scale and query documents efficiently, significantly improving performance over storing and manipulating JSON with Lua scripts and core Redis data structures.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redis JSON

    Step 2. Create a database with Redis JSON Module

    Redis JSON

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Redis JSON

    The following steps use some basic Redis JSON commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight.

    To interact with Redis JSON, you will most often use the JSON.SET and JSON.GET commands. Before using Redis JSON, you should familiarize yourself with its commands and syntax as detailed in the documentation: Redis JSON Commands.

    Let’s go ahead and test drive some JSON-specific operations for setting and retrieving a Redis key with a JSON value:

    • Scalar
    • Objects (including nested objects)
    • Arrays of JSON objects
    • JSON nested objects

    Scalar

    Under Redis JSON, a key can contain any valid JSON value. It can be scalar, objects or arrays. JSON scalar is basically a string. You will have to use the JSON.SET command to set the JSON value. For new Redis keys the path must be the root, so you will use “.” path in the example below. For existing keys, when the entire path exists, the value that it contains is replaced with the JSON value. Here you will use JSON.SET to set the JSON scalar value to “Hello JSON!” Scalar will contain a string that holds “Hello JSON!”

    >> JSON.SET scalar .  ' "Hello JSON!" '
    "OK"

    Use JSON.GET to return the value at path in JSON serialized form:

    >> JSON.GET scalar
    "\"Hello JSON!\""

    Objects

    Let’s look at a JSON object example. A JSON object contains data in the form of a key-value pair. The keys are strings and the values are the JSON types. Keys and values are separated by a colon. Each entry (key-value pair) is separated by a comma. The { (curly brace) represents the JSON object:

    {
    "employee": {
    "name": "alpha",
    "age": 40,
    "married": true
    }
    }

    Here is the command to insert JSON data into Redis:

    >> JSON.SET employee_profile . '{ "employee": { "name": "alpha", "age": 40,"married": true }  } '
    "OK"

    The subcommands below change the reply’s format and are all set to the empty string by default: INDENT sets the indentation string for nested levels . NEWLINE sets the string that’s printed at the end of each line. * SPACE sets the string that’s put between a key and a value:

    >> >> JSON.GET employee_profile
    "{\"employee\":{\"name\":\"alpha\",\"age\":40,\"married\":true}}"

    Retrieving a part of JSON document

    You can also retrieve a part of the JSON document from Redis. In the example below, “.ans” can be passed in the commandline to retrieve the value 4:

    >> JSON.SET object . '{"foo":"bar", "ans":"4" }'
    "OK"
    >> JSON.GET object
    "{\"foo\":\"bar\",\"ans\":\"4\"}"
    >> JSON.GET object .ans
    "\"4\""

    Retrieving the type of JSON data

    JSON.TYPE reports the type of JSON value at path and path defaults to root if not provided. If the key or path do not exist, null is returned.

    >> JSON.TYPE employee_profile
    "Object"

    JSON arrays of objects

    The JSON array represents an ordered list of values. A JSON array can store multiple values, including strings, numbers, or objects. In JSON arrays, values must be separated by a comma. The [ (square bracket) represents the JSON array. Let’s look at a simple JSON array example with four objects:

    {"employees":[
    {"name":"Alpha", "email":"alpha@gmail.com", "age":23},
    {"name":"Beta", "email":"beta@gmail.com", "age":28},
    {"name":"Gamma", "email":"gamma@gmail.com", "age":33},
    {"name":"Theta", "email":"theta@gmail.com", "age":41}
    ]}

    >> JSON.SET testarray . '{"employees":[ {"name":"Alpha", "email":"alpha@gmail.com", "age":23}, {"name":"Beta", "email":"beta@gmail.com", "age":28}, {"name":"Gamma", "email":"gamma@gmail.com", "age":33}, {"name":"Theta", "email":"theta@gmail.com", "age":41} ]} '
    "OK"

    >> JSON.GET testarray
    "{\"employees\":[{\"name\":\"Alpha\",\"email\":\
    alpha@gmail.com

    \",\"age\":23},{\"name\":\"Beta\",\"email\":\"beta@gmail.com....

    JSON nested objects

    A JSON object can also have another object. Here is a simple example of a JSON object having another object nested in it:

    >> JSON.SET employee_info . ' { "firstName": "Alpha",         "lastName": "K", "age": 23,        "address" : {            "streetAddress": "110 Fulbourn Road Cambridge",  "city": "San Francisco", "state": "California", "postalCode": "94016"  } } '
    "OK"
    >> JSON.GET employee_info
    "{\"firstName\":\"Alpha\",\"lastName\":\"K\",\"age\":23,\"address\":{\"streetAddress\":\"110 Fulbourn Road Cambridge\",\"city\":\"San Francisco\",\"state\":\"California\",\"postalCode\":\"94016\"}}"

    Next Steps

    - + \ No newline at end of file diff --git a/modules/redistimeseries/index.html b/modules/redistimeseries/index.html index 56339ce912..c926fbc10f 100644 --- a/modules/redistimeseries/index.html +++ b/modules/redistimeseries/index.html @@ -4,7 +4,7 @@ Redis Time Series | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Redis Time Series

    RedisTimeseries is a Redis module that enhances your experience managing time-series data with Redis. It simplifies the use of Redis for time-series use cases such as internet of things (IoT) data, stock prices, and telemetry. With Redis Time Series, you can ingest and query millions of samples and events at the speed of Redis. Advanced tooling such as downsampling and aggregation ensure a small memory footprint without impacting performance. Use a variety of queries for visualization and monitoring with built-in connectors to popular monitoring tools like Grafana, Prometheus, and Telegraf.

    Step 1. Register and subscribe

    Follow this link to register and subscribe to Redis Enterprise Cloud

    Redistimeseries

    Step 2. Create a database with Redis Time Series Module

    Redistimeseries

    Step 3. Connect to a database

    Follow this link to know how to connect to a database

    Step 4. Getting Started with Redis Time Series

    This section will walk you through using some basic RedisTimeseries commands. You can run them from the Redis command-line interface (redis-cli) or use the CLI available in RedisInsight. (See part 2 of this tutorial to learn more about using the RedisInsight CLI.) Using a basic air-quality dataset, we will show you how to:

    • Create a new time series
    • Add a new sample to the list of series
    • Query a range across one or multiple time series

    Redis Time Series

    Create a new time series

    Let’s create a time series representing air quality dataset measurements. To interact with Redis Time Series you will most often use the TS.RANGE command, but here you will create a time series per measurement using the TS.CREATE command. Once created, all the measurements will be sent using TS.ADD.

    The sample command below creates a time series and populates it with three entries:

    >> TS.CREATE ts:carbon_monoxide
    >> TS.CREATE ts:relative_humidity
    >> TS.CREATE ts:temperature RETENTION 60 LABELS sensor_id 2 area_id 32

    In the above example, ts:carbon_monoxide, ts:relative_humidity and ts:temperature are key names. We are creating a time series with two labels (sensor_id and area_id are the fields with values 2 and 32 respectively) and a retention window of 60 milliseconds:

    Add a new sample data to the time series

    Let’s start to add samples into the keys that will be automatically created using this command:

    >> TS.ADD ts:carbon_monoxide 1112596200 2.4
    >> TS.ADD ts:relative_humidity 1112596200 18.3
    >> TS.ADD ts:temperature 1112599800 28.3
    >> TS.ADD ts:carbon_monoxide 1112599800 2.1
    >> TS.ADD ts:relative_humidity 1112599800 13.5
    >> TS.ADD ts:temperature 1112603400 28.5
    >> TS.ADD ts:carbon_monoxide 1112603400 2.2
    >> TS.ADD ts:relative_humidity 1112603400 13.1
    >> TS.ADD ts:temperature 1112607000 28.7

    Querying the sample

    Now that you have sample data in your time series, you’re ready to ask questions such as:

    “How do I get the last sample?”

    TS.GET is used to get the last sample. The returned array will contain the last sample timestamp followed by the last sample value, when the time series contains data:

    >> TS.GET ts:temperature
    1) (integer) 1112607000
    2) "28.7"

    “How do I get the last sample matching the specific filter?”

    TS.MGET is used to get the last samples matching the specific filter:

    >> TS.MGET FILTER area_id=32
    1) 1) "ts:temperature"
    2) (empty list or set)
    3) 1) (integer) 1112607000
    2) "28.7"

    “How do I get the sample with labels matching the specific filter?”

    >> TS.MGET WITHLABELS FILTER area_id=32
    1) 1) "ts:temperature"
    2) 1) 1) "sensor_id"
    2) "2"
    2) 1) "area_id"
    2) "32"
    3) 1) (integer) 1112607000
    2) "28.7"

    Query a range across one or more time series

    TS.RANGE is used to query a range in forward directions while TS.REVRANGE is used to query a range in reverse directions, They let you answer such questions as:

    “How do I get the sample for a time range?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Aggregation

    You can use various aggregation types such as avg, sum, min, max, range, count, first, last etc. The example below example shows how to use “avg” aggregation type to answer such questions as:

    “How do I get the sample for a time range on some aggregation rule?”

    >> TS.RANGE ts:carbon_monoxide 1112596200 1112603400 AGGREGATION avg 2
    1) 1) (integer) 1112596200
    2) "2.4"
    2) 1) (integer) 1112599800
    2) "2.1"
    3) 1) (integer) 1112603400
    2) "2.2"

    Next Steps

    • Learn more about Redis Time Series in the Quickstart tutorial.
    - + \ No newline at end of file diff --git a/operate/docker/nodejs-nginx-redis/index.html b/operate/docker/nodejs-nginx-redis/index.html index e475e6c4bb..d52a58c02c 100644 --- a/operate/docker/nodejs-nginx-redis/index.html +++ b/operate/docker/nodejs-nginx-redis/index.html @@ -4,7 +4,7 @@ How to build and run a Node.js application using Nginx, Docker and Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to build and run a Node.js application using Nginx, Docker and Redis


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Thanks to Node.js - Millions of frontend developers that write JavaScript for the browser are now able to write the server-side code in addition to the client-side code without the need to learn a completely different language. Node.js is a free, open-sourced, cross-platform JavaScript run-time environment. It is capable to handle thousands of concurrent connections with a single server without introducing the burden of managing thread concurrency, which could be a significant source of bugs.

    Nginx-node

    In this quickstart guide, you will see how to build a Node.js application (visitor counter) using Nginx, Redis and Docker.

    What do you need?

    • Node.js: An open-source, cross-platform, back-end JavaScript runtime environment that runs on the V8 engine and executes JavaScript code outside a web browser.
    • Nginx: An open source software for web serving, reverse proxying, caching, load balancing, media streaming, and more.
    • Docker: a containerization platform for developing, shipping, and running applications.
    • Docker Compose: A tool for defining and running multi-container Docker applications.

    Project structure

    .
    ├── docker-compose.yml
    ├── redis
    ├── nginx
    │   ├── Dockerfile
    │   └── nginx.conf
    ├── web1
    │   ├── Dockerfile
    │   ├── package.json
    │   └── server.js
    └── web2
    ├── Dockerfile
    ├── package.json
    └── server.js

    Prerequisites:

    – Install Docker Desktop

    Use Docker's install guide to setup Docker Desktop for Mac or Windows on your local system.

    Docker Deskto

    info

    Docker Desktop comes with Docker compose installed by default, hence you don't need to install it separately.

    Step 1. Create a Docker compose file

    Create an empty file with the below content and save it by name - "docker-compose.yml"

    version: '3.9'
    services:
    redis:
    image: 'redis:alpine'
    ports:
    - '6379:6379'
    web1:
    restart: on-failure
    build: ./web
    hostname: web1
    ports:
    - '81:5000'
    web2:
    restart: on-failure
    build: ./web
    hostname: web2
    ports:
    - '82:5000'
    nginx:
    build: ./nginx
    ports:
    - '80:80'
    depends_on:
    - web1
    - web2

    The compose file defines an application with four services redis, web1, web2 and nginx. When deploying the application, docker-compose maps port 80 of the web service container to port 80 of the host as specified in the file.

    info

    By default, Redis runs on port 6379. Make sure you don't run another instance of Redis on your system or port 6379 on the host is not being used by another container, otherwise the port should be changed.

    Step 2. Create an nginx directory and add the following files:

    File: nginx/nginx.conf

    upstream loadbalancer {
    server web1:5000;
    server web2:5000;
    }

    server {
    listen 80;
    server_name localhost;
    location / {
    proxy_pass http://loadbalancer;
    }
    }

    File: Dockerfile

    FROM nginx:1.21.6
    RUN rm /etc/nginx/conf.d/default.conf
    COPY nginx.conf /etc/nginx/conf.d/default.conf

    Step 3. Create a web directory and add the following files:

    File: web/Dockerfile

    FROM node:14.17.3-alpine3.14

    WORKDIR /usr/src/app

    COPY ./package.json ./
    RUN npm install
    COPY ./server.js ./

    CMD ["npm","start"]

    File: web/package.json


    "name": "web",
    "version": "1.0.0",
    "description": "Running Node.js and Express.js on Docker",
    "main": "server.js",
    "scripts": {
    "start": "node server.js"
    },
    "dependencies": {
    "express": "^4.17.2",
    "redis": "3.1.2"
    },
    "author": "",
    "license": "MIT"
    }

    File: web/server.js

    const express = require('express');
    const redis = require('redis');
    const app = express();
    const redisClient = redis.createClient({
    host: 'redis',
    port: 6379
    });

    app.get('/', function(req, res) {
    redisClient.get('numVisits', function(err, numVisits) {
    numVisitsToDisplay = parseInt(numVisits) + 1;
    if (isNaN(numVisitsToDisplay)) {
    numVisitsToDisplay = 1;
    }
    res.send('Number of visits is: ' + numVisitsToDisplay);
    numVisits++;
    redisClient.set('numVisits', numVisits);
    });
    });

    app.listen(5000, function() {
    console.log('Web application is listening on port 5000');
    });

    Step 4. Deploy the application

    Let us deploy the full-fledged app using docker-compose:

    $ docker-compose up -d
    Creating nginx-nodejs-redis_redis_1 ... done
    Creating nginx-nodejs-redis_web1_1 ... done
    Creating nginx-nodejs-redis_web2_1 ... done
    Creating nginx-nodejs-redis_nginx_1 ... done

    Expected result

    Listing the running containers. You should see three containers running and the port mapping as below:

    docker-compose ps
    Name Command State Ports
    ------------------------------------------------------------------------------------------
    nginx-nodejs-redis_nginx_1 /docker-entrypoint.sh ngin Up 0.0.0.0:80->80/tcp
    ...
    nginx-nodejs-redis_redis_1 docker-entrypoint.sh redis Up 0.0.0.0:6379->6379/tcp
    ...
    nginx-nodejs-redis_web1_1 docker-entrypoint.sh npm Up 0.0.0.0:81->5000/tcp
    start
    nginx-nodejs-redis_web2_1 docker-entrypoint.sh npm Up 0.0.0.0:82->5000/tcp
    start

    Step 5. Testing the app

    After the application starts, navigate to http://localhost in your web browser or run:

    curl localhost:80
    curl localhost:80
    web1: Total number of visits is: 1
    curl localhost:80
    web1: Total number of visits is: 2
    $ curl localhost:80
    web2: Total number of visits is: 3
    $ curl localhost:80
    web2: Total number of visits is: 4

    Step 6. Monitoring Redis keys

    If you want to monitor the Redis keys, you can use the MONITOR command. Install redis-cli on your Mac system using brew install redis and then directly connect to Redis container by issuing the following command:

    % redis-cli
    127.0.0.1:6379> monitor
    OK
    1646485507.290868 [0 172.24.0.2:34330] "get" "numVisits"
    1646485507.309070 [0 172.24.0.2:34330] "set" "numVisits" "5"
    1646485509.228084 [0 172.24.0.2:34330] "get" "numVisits"
    1646485509.241762 [0 172.24.0.2:34330] "set" "numVisits" "6"
    1646485509.619369 [0 172.24.0.4:52082] "get" "numVisits"
    1646485509.629739 [0 172.24.0.4:52082] "set" "numVisits" "7"
    1646485509.990926 [0 172.24.0.2:34330] "get" "numVisits"
    1646485509.999947 [0 172.24.0.2:34330] "set" "numVisits" "8"
    1646485510.270934 [0 172.24.0.4:52082] "get" "numVisits"
    1646485510.286785 [0 172.24.0.4:52082] "set" "numVisits" "9"
    1646485510.469613 [0 172.24.0.2:34330] "get" "numVisits"
    1646485510.480849 [0 172.24.0.2:34330] "set" "numVisits" "10"
    1646485510.622615 [0 172.24.0.4:52082] "get" "numVisits"
    1646485510.632720 [0 172.24.0.4:52082] "set" "numVisits" "11"

    Further References

    - + \ No newline at end of file diff --git a/operate/observability/datadog/index.html b/operate/observability/datadog/index.html index 7650a8bdc9..80fb0ec274 100644 --- a/operate/observability/datadog/index.html +++ b/operate/observability/datadog/index.html @@ -4,7 +4,7 @@ Redis Enterprise Observability with Datadog | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Enterprise Observability with Datadog


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Christian Mague
    Author:
    Christian Mague, Former Principal Field Engineer at Redis

    Datadog

    Devops and SRE practitioners are already keenly aware of the importance of system reliability, as it’s one of the shared goals in every high performing organization. Defining clear reliability targets based on solid data is crucial for productive collaboration between developers and SREs. This need spans the entire infrastructure from application to backend database services.

    Service Level Objectives (SLOs) provide a powerful interface for all teams to set clear performance and reliability goals based on Service Level Indicators (SLIs) or data points. A good model is to think of the SLIs as the data and the SLO as the information one uses to make critical decisions.

    Further Read: https://cloud.google.com/blog/products/devops-sre/sre-fundamentals-slis-slas-and-slos

    Redis

    Redis is a popular multi-model NoSQL database server that provides in-memory data access speeds for search, messaging, streaming, caching, and graph—amongst other capabilities. Highly performant sites such as Twitter, Snapchat, Freshworks, GitHub, Docker, Pinterest, and Stack Overflow all look to Redis to move data in real time.

    Redis SLOs can be broken down into three main categories:

    CategoryDefinitionExample SLOExample SLI
    ThroughputNumber of operations being pushed through the service in a given time periodSystem should be capable of performing 200M operations per secondredisenterprise.total_req
    LatencyElapsed time it takes for an operationAverage write latency should not exceed 1 millisecondredis_enterprise.avg_latency
    CapacityMemory/storage/network limits of the underlying data sourceDatabase should have 20% memory overhead available to handle burstsredisenterprise.used_memory_percent

    Why Datadog?

    Running your own performance data platform is time consuming and difficult. Datadog provides an excellent platform with an open source agent to collect metrics and allows them to be displayed easily and alerted upon when necessary.

    Datadog allows you to:

    • Collect metrics from various infrastructure components out of the box
    • Display that data in easy to read dashboards
    • Monitor performance metrics and alert accordingly
    • Correlate log entries with metrics to quickly drill down to root causes

    Key Performance Indicators

    1. Latency

    Definition

    redisenterprise.avg_latency (unit: microseconds)

    This is the average amount of time that a request takes to return from the time that it first hits the Redis Enterprise proxy until the response is returned. It does not include the full time from the remote client’s perspective.

    Characteristics

    Since Redis is popular due to performance, generally you would expect most operations to return in single digit milliseconds. Tune any alerts to match your SLA. It’s generally recommended that you also measure Redis operation latency at the client side to make it easier to determine if a server slow down or an increase in network latency is the culprit in any performance issues.

    Possible Causes
    CauseFactors
    Spike in requestsCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Slow-running queriesCheck the slow log in the Redis Enterprise UI for the database
    Insufficient compute resourcesCheck to see if the CPU Usage, Memory Usage Percentage, or Evictions are increasing
    Remediation
    ActionMethod
    Increase resourcesThe database can be scaled up online by going to the Web UI and enabling clustering on the database. In extreme cases, more nodes can be added to the cluster and resources rebalanced
    Inefficient queriesRedis allows you to view a slow log with a tunable threshold. It can be viewed either in the Redis Enterprise UI or by running: redis-cli -h HOST -p PORT -a PASSWORD SLOWLOG GET 100

    2. Memory Usage Percentage

    Definition

    redisenterprise.memory_usage_percent (unit: percentage)

    This is the percentage of used memory over the memory limit set for the database.

    Characteristics

    In Redis Enterprise, all databases have a maximum memory limit set to ensure isolation in a multi-tenant environment. This is also highly recommended when running open source Redis. Be aware that Redis does not immediately free memory upon key deletion. Depending on the size of the database, generally between 80-95% is a safe threshold.

    Possible Causes
    CauseFactors
    Possible spike in activityCheck both the Network Traffic and Operations Per Second metrics to determine if there is a corresponding increase
    Database sized incorrectlyView the Memory Usage raw bytes over time to see if a usage pattern has changed
    Incorrect retention policiesCheck to see if keys are being Evicted or Expired
    Remediation
    ActionMethod
    Increase resourcesThe database memory limit can be raised online with no downtime through either the Redis Enterprise UI or the API
    Retention PolicyIn a caching use case, setting a TTL for unused data to expire is often helpful. In addition, Eviction policies can be set, however, these may often not be able to keep up in extremely high throughput environments with very tight resource constraints

    3. Cache Hit Rate

    Definition
    redisenterprise.cache_hit_rate (unit: percent)

    This is the percentage of time that Redis is accessing a key that already exists.

    Characteristics

    This metric is useful only in the caching use case and should be ignored for all other use cases. There are tradeoffs between the freshness of the data in the cache and efficacy of the cache mitigating traffic to any backend data service. These tradeoffs should be considered carefully when determining the threshold for alerting.

    Possible Causes

    This is highly specific to the application caching with no general rules that are applicable in the majority of cases.

    Remediation

    Note that Redis commands return information on whether or not a key or field already exists. For example, the HSET command returns the number of fields in the hash that were added.

    4. Evictions

    Definition
    redisenterprise.evicted_objects (unit: count)

    This is the count of items that have been evicted from the database.

    Characteristics

    Eviction occurs when the database is close to capacity. In this condition, the eviction policy starts to take effect. While Expiration is fairly common in the caching use case, Eviction from the cache should generally be a matter of concern. At very high throughput and very restricted resource use cases, sometimes the eviction sweeps cannot keep up with memory pressure. Relying on Eviction as a memory management technique should be considered carefully.

    Possible Causes

    See Memory Usage Percentage Possible Causes

    Remediation

    See Memory Usage Percentage Remediation

    Secondary Indicators

    1. Network Traffic

    Definition
    redisenterprise.ingress_bytes/redisenterprise.egress_bytes (unit: bytes)

    Counters for the network traffic coming into the database and out from the database.

    Characteristics

    While these two metrics will not help you pinpoint a root cause, network traffic is an excellent leading indicator of trouble. Changes in network traffic patterns indicate corresponding changes in database behavior and further investigation is usually warranted.

    2. Connection Count

    Definition
    redisenterprise.conns (unit: count)

    The number of current client connections to the database.

    Characteristics

    This metric should be monitored with both a minimum and maximum number of connections. The minimum number of connections not being met is an excellent indicator of either networking or application configuration errors. The maximum number of connections being exceeded may indicate a need to tune the database.

    Possible Causes
    CauseFactors
    Minimum clients not metIncorrect client configuration, network firewall, or network issues
    Maximum connections exceededClient library is not releasing connections or an increase in the number of clients
    Remediation
    ActionMethod
    Clients MisconfiguredConfirm client configurations
    Networking issueIssue the PING command from a client node TELNET to the endpoint
    Too many connectionsBe sure that you are using pooling on your client library and that your pools are sized accordingly
    Too many connectionsUsing rladmin, run: tune proxy PROXY_NUMBER threads VALUE threads VALUE

    You can access the complete list of metrics here.

    Getting Started

    Follow the steps below to set up the Datadog agent to monitor your Redis Enterprise cluster, as well as database metrics:

    Quickstart Guide:

    Prerequisites:

    • Follow this link to setup your Redis Enterprise cluster and database
    • Setup a Read-only user account by logging into your Redis Enterprise instance and visiting the “Access Control” section

    alt_text

    • Add a new user account with Cluster View Permissions.

    alt_text

    Step 1. Set Up a Datadog Agent

    Before we jump into the installation, let’s look at the various modes that you can run the Datadog agent in:

    • External Monitor Mode
    • Localhost Mode

    External Monitor Mode

    alt_text

    In external monitor mode, a Datadog agent running outside of the cluster can monitor multiple Redis Enterprise clusters, as shown in the diagram above.

    Localhost Mode

    Using localhost mode, the integration can be installed on every node of a Redis Enterprise cluster. This allows the user to correlate OS level metrics with Redis-specific metrics for faster root cause analysis. Only the Redis Enterprise cluster leader will submit metrics and events to Datadog. In the event of a migration of the cluster leader, the new cluster leader will begin to submit data to Datadog.

    alt_text

    For this demo, we will be leveraging localhost mode as we just have two nodes to configure.

    Step 2. Launch the Datadog agent on the Master node

    Pick up your preferred OS distribution and install the Datadog agent

    alt_text

    Run the following command to install the integration wheel with the Agent. Replace the integration version with 1.0.1.

     datadog-agent integration install -t datadog-redisenterprise==<INTEGRATION_VERSION>

    Step 3. Configuring Datadog configuration file

    Copy the sample configuration and update the required sections to collect data from your Redis Enterprise cluster:

    For Localhost Mode

    The following minimal configuration should be added to the Enterprise Master node.

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
     #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    Similarly, you need to add the edit the configuration file for the Enterprise Follower to add the following:

     sudo vim /etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
      #################################################################
    # Base configuration
    init_config:

    instances:
    - host: localhost
    username: user@example.com
    password: secretPassword
    port: 9443

    For External Monitor Mode

    The following configuration should be added to the Monitor node

    #  Base configuration
    init_config:

    instances:
    - host: cluster1.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    - host: cluster2.fqdn
    username: user@example.com
    password: secretPassword
    port: 9443

    Step 4. Restart the Datadog Agent service

     sudo service datadog-agent restart

    Step 5. Viewing the Datadog Dashboard UI

    Find the Redis Enterprise Integration under the Integration Menu:

    alt_text

    Displaying the host reporting data to Datadog:

    alt_text

    Listing the Redis Enterprise dashboards:

    alt_text

    Host details under Datadog Infrastructure list:

    alt_text

    Datadog dashboard displaying host metrics of the 1st host (CPU, Memory Usage, Load Average etc):

    alt_text

    Datadog dashboard displaying host metrics of the 2nd host:

    alt_text

    Step 6. Verifying the Datadog Agent Status

    Running the datadog-agent command shows that the Redis Enterprise integration is working correctly.

     sudo datadog-agent status
     redisenterprise (1.0.1)
    -----------------------
    Instance ID: redisenterprise:ef4cd60aadac5744 [OK]
    Configuration Source: file:/etc/datadog-agent/conf.d/redisenterprise.d/conf.yaml
    Total Runs: 2
    Metric Samples: Last Run: 0, Total: 0
    Events: Last Run: 0, Total: 0
    Service Checks: Last Run: 0, Total: 0
    Average Execution Time : 46ms
    Last Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)
    Last Successful Execution Date : 2021-10-28 17:27:10 UTC (1635442030000)

    Redis Enterprise Cluster Top View

    alt_text

    Let’s run a memory benchmark tool called redis-benchmark to simulate an arbitrary number of clients connecting at the same time and performing actions on the server, measuring how long it takes for the requests to be completed.

     memtier_benchmark --server localhost -p 19701 -a password
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    This command instructs memtier_benchmark to connect to your Redis Enterprise database and generates a load doing the following:

    • Write objects only, no reads.
    • Each object is 500 bytes.
    • Each object has random data in the value.
    • Each key has a random pattern, then a colon, followed by a random pattern.

    Run this command until it fills up your database to where you want it for testing. The easiest way to check is on the database metrics page.

     memtier_benchmark --server localhost -p 19701 -a Oracle9ias12# -R -n allkeys -d 500 --key-pattern=P:P --ratio=1:0
    setting requests to 50001
    [RUN #1] Preparing benchmark client...
    [RUN #1] Launching threads now...

    alt_text

    The Datadog Events Stream shows an instant view of your infrastructure and services events to help you troubleshoot issues happening now or in the past. The event stream displays the most recent events generated by your infrastructure and the associated monitors, as shown in the diagram below.

    alt_text

    References:

    - + \ No newline at end of file diff --git a/operate/observability/redisdatasource/index.html b/operate/observability/redisdatasource/index.html index a797c879ec..2e3d581b89 100644 --- a/operate/observability/redisdatasource/index.html +++ b/operate/observability/redisdatasource/index.html @@ -4,7 +4,7 @@ How to add Redis as a datasource in Grafana and build customize dashboards for Analytics | The Home of Redis Developers - + @@ -16,7 +16,7 @@ In our case, we will be using redis-datasource.

     docker run -d -p 3000:3000 --name=grafana -e "GF_INSTALL_PLUGINS=redis-datasource" grafana/grafana

    Step 3. Accessing the Grafana dashboard

    Open https://IP:3000 to access Grafana. The default username/password is admin/admin.

    grafana

    Step 4. Click "Configuration"

    grafana

    Step 5. Add Redis as a Data Source

    grafana

    Step 6. Select "Redis" as data source type

    grafana

    Step 7. Add Redis Database name, Endpoint URL and password

    We'll assume that you already have a Redis server and up and running in your infrastructure. You can also leverage Redis Enterprise Cloud as demonstrated below.

    grafana

    Step 8. Click "Import" under Dashboard

    grafana

    Step 9.Access the Redis datasource Dashboard

    grafana

    Supported commands

    Data Source supports various Redis commands using custom components and provides a unified interface to query any command.

    Query

    Further References

    - + \ No newline at end of file diff --git a/operate/orchestration/docker/images/index.html b/operate/orchestration/docker/images/index.html index 8b19c7b32b..fb452250ac 100644 --- a/operate/orchestration/docker/images/index.html +++ b/operate/orchestration/docker/images/index.html @@ -4,7 +4,7 @@ List of Images | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/operate/orchestration/docker/index.html b/operate/orchestration/docker/index.html index 89e6685e71..343c683812 100644 --- a/operate/orchestration/docker/index.html +++ b/operate/orchestration/docker/index.html @@ -4,7 +4,7 @@ How to Deploy and Run Redis in a Docker container | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    How to Deploy and Run Redis in a Docker container


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Pre-requisites

    Ensure that Docker is installed in your system.

    If you're new, refer to Docker's installation guide to install Docker on Mac.

    To pull and start the Redis Enterprise Software Docker container, run this docker run command in the terminal or command-line for your operating system.

    note

    On Windows, make sure Docker is configured to run Linux-based containers.

    docker run -d --cap-add sys_resource --name rp -p 8443:8443 -p 9443:9443 -p 12000:12000 redislabs/redis

    In the web browser on the host machine, go to https://localhost:8443 to see the Redis Enterprise Software web console.

    Step 1: Click on “Setup”

    Click Setup to start the node configuration steps.

    Setting up nodes

    Step 2: Enter your preferred FQDN

    In the Node Configuration settings, enter a cluster FQDN such as demo.redis.com. Then click the Next button.

    Redis Enterprise Setup

    Enter your license key, if you have one. If not, click the Next button to use the trial version.

    Step 3: Enter the admin credentials

    Enter an email and password for the admin account for the web console.

    Login credentials

    These credentials are also used for connections to the REST API. Click OK to confirm that you are aware of the replacement of the HTTPS SSL/TLS certificate on the node, and proceed through the browser warning.

    Step 4: Create a Database:

    Select “Redis database” and the “single region” deployment, and click Next.

    Creating a database

    Enter a database name such as demodb and click Activate to create your database

    Creating a database

    You now have a Redis database!

    Step 5: Connecting using redis-cli

    After you create the Redis database, you are ready to store data in your database. redis-cli is a built-in simple command-line tool to interact with Redis databases. Run redis-cli, located in the /opt/redislabs/bin directory, to connect to port 12000 and store and retrieve the value of a key in the database:

    $ docker exec -it rp bash
    redislabs@fd8dca50f905:/opt$
    /opt/redislabs/bin/redis-cli -p 12000
    127.0.0.1:12000> auth <enter password>
    OK
    127.0.0.1:12000> set key1 123
    OK
    127.0.0.1:12000> get key1
    "123"

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/orchestration/index.html b/operate/orchestration/index.html index 19bf0608de..c13b8656ec 100644 --- a/operate/orchestration/index.html +++ b/operate/orchestration/index.html @@ -4,7 +4,7 @@ Orchestration | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Orchestration

    The following links show you with the various ways to connect your containerized workloads to Redis.

    How to Deploy and Run Redis in a Docker container
    Learn how to build a Node.js based app and run it using Nginx, Docker and Redis
    Create Redis database on Kubernetes
    Learn about Kubernetes Operator and its benefits
    - + \ No newline at end of file diff --git a/operate/orchestration/kubernetes-operator/index.html b/operate/orchestration/kubernetes-operator/index.html index a513cf37de..9456a56386 100644 --- a/operate/orchestration/kubernetes-operator/index.html +++ b/operate/orchestration/kubernetes-operator/index.html @@ -4,7 +4,7 @@ Kubernetes Operator: What It Is and Why You Should Really Care About It | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Kubernetes Operator: What It Is and Why You Should Really Care About It


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    My Image

    Kubernetes is popular due to its capability to deploy new apps at a faster pace. Thanks to "Infrastructure as data" (specifically, YAML), today you can express all your Kubernetes resources such as Pods, Deployments, Services, Volumes, etc., in a YAML file. These default objects make it much easier for DevOps and SRE engineers to fully express their workloads without the need to learn how to write code in a programming language like Python, Java, or Ruby.

    Kubernetes is designed for automation. Out of the box, you get lots of built-in automation from the core of Kubernetes. It can speed up your development process by making easy, automated deployments, updates (rolling update), and by managing your apps and services with almost zero downtime. However, Kubernetes can’t automate the process natively for stateful applications. For example, say you have a stateful workload, such as a database application, running on several nodes. If a majority of nodes go down, you’ll need to reload the database from a specific snapshot following specific steps. Using existing default objects, types, and controllers in Kubernetes, this would be impossible to achieve.

    Think of scaling nodes up, or upgrading to a new version, or disaster recovery for your stateful application — these kinds of operations often need very specific steps, and typically require manual intervention. Kubernetes cannot know all about every stateful, complex, clustered application. Kubernetes, on its own, does not know the configuration values for, say, a Redis database cluster, with its arranged memberships and stateful, persistent storage. Additionally, scaling stateful applications in Kubernetes is not an easy task and requires manual intervention.

    Stateful vs Stateless Applications

    Let’s try to understand the difference between stateful versus stateless applications with a simple example. Consider a Kubernetes cluster running a simple web application (without any operator). The YAML file below allows you to create two replicas of NGINX (a stateless application).

     apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: nginx-deployment
    namespace: web
    spec:
    selector:
    matchLabels:
    app: nginx
    replicas: 2
    template:
    metadata:
    labels:
    app: nginx
    spec:
    containers:
    - name: nginx
    image: nginx:1.14.2
    ports:
    - containerPort: 80

    In the example above, a Deployment object named nginx-deployment is created under a namespace “web,” indicated by the .metadata.name field. It creates two replicated Pods, indicated by the .spec.replicas field. The .spec.selector field defines how the Deployment finds which Pods to manage. In this case, you select a label that is defined in the Pod template (app: nginx). The template field contains the following subfields: the Pods are labeled app: nginx using the .metadata.labels field and the Pod template's specification indicates that the Pods run one container, nginx, which runs the nginx Docker Hub image at version 1.14.2. Finally, it creates one container and names it nginx.

    Run the command below to create the Deployment resource:

    kubectl create -f nginx-dep.yaml

    Let us verify if the Deployment was created successfully by running the following command:

     kubectl get deployments
    NAME READY UP-TO-DATE AVAILABLE AGE
    nginx-deployment 2/2 2 2 63s

    The example above shows the name of the Deployment in the namespace. It also displays how many replicas of the application are available to your users. You can also see that the number of desired replicas that have been updated to achieve the desired state is 2.

    alt_text

    You can run the kubectl describe command to get detailed information of deployment resources. To show details of a specific resource or group of resources:

     kubectl describe deploy
    Name: nginx-deployment
    Namespace: default
    CreationTimestamp: Mon, 30 Dec 2019 07:10:33 +0000
    Labels: <none>
    Annotations: deployment.kubernetes.io/revision: 1
    Selector: app=nginx
    Replicas: 2 desired | 2 updated | 2 total | 0 available | 2 unavailable
    StrategyType: RollingUpdate
    MinReadySeconds: 0
    RollingUpdateStrategy: 25% max unavailable, 25% max surge
    Pod Template:
    Labels: app=nginx
    Containers:
    nginx:
    Image: nginx:1.7.9
    Port: 80/TCP
    Host Port: 0/TCP
    Environment: <none>
    Mounts: <none>
    Volumes: <none>
    Conditions:
    Type Status Reason
    ---- ------ ------
    Available False MinimumReplicasUnavailable
    Progressing True ReplicaSetUpdated
    OldReplicaSets: <none>
    NewReplicaSet: nginx-deployment-6dd86d77d (2/2 replicas created)
    Events:
    Type Reason Age From Message
    ---- ------ ---- ---- -------
    Normal ScalingReplicaSet 90s deployment-controller Scaled up replica set nginx-deployment-6dd86d77d to 2

    A Deployment is responsible for keeping a set of Pods running, but it’s equally important to expose an interface to these Pods so that the other external processes can access them. That’s where the Service resource comes in. The Service resource lets you expose an application running in Pods to be reachable from outside your cluster. Let us create a Service resource definition as shown below:

    apiVersion: v1
    kind: Service
    metadata:
    name: nginx-service
    spec:
    selector:
    app: nginx
    ports:
    - port: 80
    targetPort: 80
    type: LoadBalancer

    The above YAML specification creates a new Service object named "nginx-service," which targets TCP port 80 on any Pod with the app=nginx label.

     kubectl get svc -n web
    NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
    nginx-service LoadBalancer 10.107.174.108 localhost 80:31596/TCP 46s

    alt_text

    Let’s scale the Deployment to 4 replicas. We are going to use the kubectl scale command, followed by the deployment type, name, and desired number of instances. The output is similar to this:

    kubectl scale deployments/nginx-deployment --replicas=4
    deployment.extensions/nginx-deployment scaled

    The change was applied, and we have 4 instances of the application available. Next, let’s check if the number of Pods changed. There should now be 4 Pods running in the cluster (as shown in the diagram below)

    alt_text

     kubectl get deployments
    NAME READY UP-TO-DATE AVAILABLE AGE
    nginx-deployment 4/4 4 4 4m

    There are 4 Pods, with different IP addresses. The change was registered in the Deployment events log.

     kubectl get pods -o wide
    NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    nginx-deployment-6dd86d77d-b4v7k 1/1 Running 0 4m32s 10.1.0.237 docker-desktop none none
    nginx-deployment-6dd86d77d-bnc5m 1/1 Running 0 4m32s 10.1.0.236 docker-desktop none none
    nginx-deployment-6dd86d77d-bs6jr 1/1 Running 0 86s 10.1.0.239 docker-desktop none none
    nginx-deployment-6dd86d77d-wbdzv 1/1 Running 0 86s 10.1.0.238 docker-desktop none none

    Deleting one of the web server Pods triggers work in the control plane to restore the desired state of four replicas. Kubernetes starts a new Pod to replace the deleted one. In this excerpt, the replacement Pod shows a STATUS of ContainerCreating:

     kubectl delete pod nginx-deployment-6dd86d77d-b4v7k

    You will notice that the Nginx static web server is interchangeable with any other replica, or with a new Pod that replaces one of the replicas. It doesn’t store data or maintain state in any way. Kubernetes doesn’t need to make any special arrangements to replace a failed Pod, or to scale the application by adding or removing replicas of the server. Now you might be thinking, what if you want to store the state of the application? Great question.

    Scaling stateful application is hard

    Scaling stateless applications in Kubernetes is easy but it’s not the same case for stateful applications. Stateful applications require manual intervention. Bringing Pods up and down is not that simple. Each Node has an identity and some data attached to it. Removing a Pod means losing its data and disrupting the system.

    alt_text

    Consider a Kubernetes cluster with 6 worker Nodes hosting a Nginx web application connected to a persistent volume as shown above. Here is the snippet of StatefulSets YAML file:


    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
    name: web
    spec:
    serviceName: "nginx"
    replicas: 2
    selector:
    matchLabels:
    app: nginx
    template:
    metadata:
    labels:
    app: nginx
    spec:
    containers:
    - name: nginx
    image: nginx:1.14.2
    ports:
    - containerPort: 80
    name: web
    volumeMounts:
    - name: www
    mountPath: /usr/share/nginx/html
    volumeClaimTemplates:
    - metadata:
    name: www
    spec:
    accessModes: [ "ReadWriteOnce" ]
    resources:
    requests:
    storage: 1Gi

    Kubernetes makes physical storage devices available to your cluster in the form of objects called Persistent Volumes. Each of these Persistent Volumes is consumed by a Kubernetes Pod by issuing a PersistentVolumeClaim object, also known as PVC. A PVC object lets Pods use storage from Persistent Volumes. Imagine a scenario in which we want to downscale a cluster from 5 Nodes to 3 Nodes. Suddenly removing 2 Nodes at once is a potentially destructive operation. This might lead to the loss of all copies of the data. A better way to handle Node removal would be to first migrate data from the Node to be removed to other Nodes in the system before performing the actual Pod deletion. It is important to note that the StatefulSet controller is necessarily generic and cannot possibly know about every possible way to manage data migration and replication. In practice, however, StatefulSets are rarely enough to handle complex, distributed stateful workload systems in production environments.

    Now the question is, how to solve this problem? Enter Operators. Operators were developed to handle the sophisticated, stateful applications that the default Kubernetes controllers aren’t able to handle. While Kubernetes controllers like StatefulSets are ideal for deploying, maintaining, and scaling simple stateless applications, they are not equipped to handle access to stateful resources, or to upgrade, resize, and backup of more elaborate clustered applications such as databases. A Kubernetes Operator fills in the gaps between the capabilities and automation provided by Kubernetes and how your software uses Kubernetes for automation of tasks relevant to your software.

    An Operator is basically an application-specific controller that can help you manage a Kubernetes application. It is a way to package, run, and maintain a Kubernetes application. It is designed to extend the capabilities of Kubernetes, and also simplify application management. This is especially useful for stateful applications, which include persistent storage and other elements external to the application, and may require extra work to manage and maintain.

    Functions of Kubernetes Operator

    A Kubernetes Operator uses the Kubernetes API to create, configure, and manage instances of complex stateful applications on behalf of a Kubernetes user. There is a public repository called OperatorHub.io that is designed to be the public registry for finding Kubernetes Operator backend services. With Operator Hub, developers can easily create an application based on an operator without going through the complexity of crafting an operator from scratch.

    alt_text

    Below are a few examples of popular Kubernetes Operators and their functions and capabilities.

    Kubernetes Operators:

    • Helps you deploy an application on demand (for example, Argo CD operator (Helm is a declarative, GitOps continuous delivery tool for Kubernetes that helps with easy installation and configuration on demand)
    • Helps you install applications with the required configurations and number of application instances
    • Allows you to take and restore backups of the application state (for example, Velero operator manages disaster recovery, backup, and restoration of cluster components such as pv, pvc, deployments, etc., to aid in disaster recovery)
    • Handles the upgrades of the application code plus the changes, such as database schema (for example, Flux is a continuous delivery solution for Kubernetes that allows automating updates to configuration when there is new code to deploy)
    • Can manage a cluster of database servers (for example, MariaDB operator creates MariaDB server and database easily by defining simple custom resource)
    • Can install a database cluster of a declared software version and number of members
    • Scale applications in or out
    • Continues to monitor an application as it runs (for example, Prometheus Operator simplifies the deployment and configuration of Prometheus, Alertmanager, and related monitoring components)
    • Initiate upgrades, automated backups, and failure recovery, simulating failure in all or part of your cluster to test its resilience
    • Allows you to publish a service to applications that don’t support Kubernetes APIs to discover them

    How does an Operator work?

    Operators work by extending the Kubernetes control plane and API. Operators allows you to define a Custom Controller that watches your application and performs custom tasks based on its state. The application you want to watch is usually defined in Kubernetes as a new object: a Custom Resource (CR) that has its own YAML spec and object type that is well understood by the API server. That way, you can define any specific criteria in the custom spec to watch out for, and reconcile the instance when it doesn’t match the spec. The way an Operator’s controller reconciles against a spec is very similar to native Kubernetes controllers, though it is using mostly custom components.

    What is the Redis Enterprise Operator?

    Redis has created an Operator that deploys and manages the lifecycle of a Redis Enterprise Cluster. The Redis Enterprise Operator is the fastest, most efficient way to deploy and maintain a Redis Enterprise cluster in Kubernetes. The Operator creates, configures, and manages Redis Enterprise deployments from a single Kubernetes control plane. This means that you can manage Redis Enterprise instances on Kubernetes just by creating native objects, such as a Deployment, ReplicaSet, StatefulSet, etc. Operators allow full control over the Redis Enterprise cluster lifecycle.

    The Redis Enterprise Operator acts as a custom controller for the custom resource Redis Enterprise Cluster, or “REC”, which is defined through Kubernetes CRD (customer resource definition) and deployed with a YAML file.The Redis Enterprise Operator functions as the logic “glue” between the Kubernetes infrastructure and the Redis Enterprise cluster.

    How does the Redis Enterprise Operator work?

    alt_text

    The Redis Enterprise Operator supports two Custom Resource Definitions (CRDs):

    • Redis Enterprise Cluster (REC): An API to create Redis Enterprise clusters. Note that only one cluster is supported per Operator deployment.
    • Redis Enterprise Database (REDB): An API to create Redis databases running on the Redis Enterprise cluster. Note that the Redis Enterprise Operator is namespaced. High-level architecture and overview of the solution can be found HERE.

    This is how it works:

    1. First, the Redis Enterprise cluster custom resource (“CR” for short) is read and validated by the operator for a cluster specification.
    2. Secondly, cluster StatefulSet, service rigger, cluster admin secrets, RS/UI services are created.
    3. A Redis Enterprise Database CR is read and validated by the operator.
    4. The database is created on the cluster and the database access credentials are stored in a Kubernetes secret object.
    5. The service rigger discovers the new database and configures the Kubernetes service for the database.
    6. An application workload uses the database secret and service for access to data.

    Example of Operator automation

    Consider the YAML file below:

    apiVersion: app.redislabs.com/v1
    kind: RedisEnterpriseCluster
    metadata:
    name: rec
    spec:
    # Add fields here
    nodes: 3

    If you change the number of nodes to 5, the Operator talks to StatefulSets, and changes the number of replicas from 3 to 5. Once that happens, Kubernetes will take over and bootstrap new Nodes one at a time, deploying Pods accordingly. As each becomes ready, the new Nodes join the cluster and become available to Redis Enterprise master Nodes.

    alt_text

    apiVersion: app.redislabs.com/v1
    kind: RedisEnterpriseDatabase
    metadata:
    name: redis-enterprise-database
    spec:
    redisEnterpriseCluster:
    name: redis-enterprise
    Memory: 2G

    alt_text

    In order to create a database, the Operator discovers the resources, talks to the cluster RestAPI, and then it creates the database. The server talks to the API and discovers it. The DB creates a Redis database service endpoint for that database and that will be available.

    In the next tutorial, you will learn how to get started with the Redis Enterprise Kubernetes Operator from scratch, including how to perform non-trivial tasks such as backup, restore, horizontal scaling, and much more. Stay tuned!

    References

    - + \ No newline at end of file diff --git a/operate/orchestration/kubernetes/kubernetes-gke/index.html b/operate/orchestration/kubernetes/kubernetes-gke/index.html index 7f973e892e..e228757bbd 100644 --- a/operate/orchestration/kubernetes/kubernetes-gke/index.html +++ b/operate/orchestration/kubernetes/kubernetes-gke/index.html @@ -4,7 +4,7 @@ Create a Redis database on Google Kubernetes Engine | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Create a Redis database on Google Kubernetes Engine


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis

    Step 1. Prerequisites

    Step 2. Ensure that gcloud is installed on your local Linux system:

    $ gcloud -v
    Google Cloud SDK 320.0.0
    alpha 2020.12.04
    app-engine-go 1.9.71
    app-engine-java 1.9.84
    app-engine-python 1.9.91
    app-engine-python-extras 1.9.91

    Step 3. Create a 5 Node GKE cluster:

    $ gcloud container clusters create testredis  --subnetwork default --num-nodes 5 --machine-type e2-standard-8 --enable-basic-auth --region us-east1

    Step 4. Create a new namespace

    [node1 kubelabs]$ kubectl create namespace demo
    namespace/demo created

    Step 5. Switch context to the newly created namespace

    $ kubectl config set-context --current --namespace=demo
    Context "kubernetes-admin@kubernetes" modified.

    Step 6. Deploy the operator bundle

    To deploy the default installation with kubectl, the following command will deploy a bundle of all the YAML declarations required for the operator. You can download the bundle YAML file via this link:

    $ kubectl apply -f bundle.yaml
    role.rbac.authorization.k8s.io/redis-enterprise-operator created
    rolebinding.rbac.authorization.k8s.io/redis-enterprise-operator created
    serviceaccount/redis-enterprise-operator created
    customresourcedefinition.apiextensions.k8s.io/redisenterpriseclusters.app.redislabs.com created
    deployment.apps/redis-enterprise-operator created
    customresourcedefinition.apiextensions.k8s.io/redisenterprisedatabases.app.redislabs.com created

    Step 7. Verifying the Deployment:

    Run the following command to verify redis-enterprise-operator deployment is running.

    kubectl get deployment
    NAME READY UP-TO-DATE AVAILABLE AGE
    redis-enterprise-operator 1/1 1 1 9m34s

    Step 8. Create a Redis Enterprise Cluster

    Create a Redis Enterprise Cluster (REC) using the default configuration, which is suitable for development type deployments and works in typical scenarios:

    $ kubectl apply -f crds/app_v1_redisenterprisecluster_cr.yaml

    redisenterprisecluster.app.redislabs.com/redis-enterprise created

    Step 9. Verifying the Redis Enterprise Cluster

    rec is a shortcut for RedisEnterpriseCluster. The cluster takes around 5-10 minutes to come up. Run the command below to check that the RedisEnterpriseCluster is up:

    $ kubectl get rec
    NAME AGE
    redis-enterprise 14s
    [node1 redis-enterprise-k8s-docs]$

    Step 10. Listing Kubernetes Resources

    $ kubectl get po,svc,deploy
    NAME READY STATUS RESTARTS AGE
    pod/redis-enterprise-0 2/2 Running 0 6m42s
    pod/redis-enterprise-1 2/2 Running 0 4m34s
    pod/redis-enterprise-2 2/2 Running 0 2m18s
    pod/redis-enterprise-operator-58f8566fd7-5kcvz 1/1 Running 0 69m
    pod/redis-enterprise-services-rigger-5849b86c65-lwql9 1/1 Running 0 6m42s
    NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
    service/kubernetes ClusterIP 10.3.240.1 <none> 443/TCP 71m
    service/redis-enterprise ClusterIP None <none> 9443/TCP,8001/TCP,8070/TCP 6m42s
    service/redis-enterprise-ui LoadBalancer 10.3.246.252 35.196.117.24 8443:31473/TCP 6m42s
    NAME READY UP-TO-DATE AVAILABLE AGE
    deployment.apps/redis-enterprise-operator 1/1 1 1 69m
    deployment.apps/redis-enterprise-services-rigger 1/1 1 1 6m44s

    You can verify the Pods and list of services using the Google Cloud Dashboard UI:

    Redis Enterprise UI

    Step 11. Listing the Secrets

    kubectl get secrets redis-enterprise -o yaml | grep password | awk '{print $2}'
    bXVLeHRpblY=

    Step 12. Listing the Password

    echo bXVLeHRpblY= | base64 -d

    Step 13. Creating a Database

    Open https://localhost:8443 in the browser to see the Redis Enterprise Software web console. Click on "Setup", add your preferred DNS and admin credentials and proceed further to create your first Redis database using Redis Enterprise.

    Next Steps

    Redis Launchpad
    - + \ No newline at end of file diff --git a/operate/provisioning/terraform/index.html b/operate/provisioning/terraform/index.html index df6e7a224e..62ecc672cd 100644 --- a/operate/provisioning/terraform/index.html +++ b/operate/provisioning/terraform/index.html @@ -4,7 +4,7 @@ How to Deploy and Manage Redis Databases on AWS Using Terraform | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    How to Deploy and Manage Redis Databases on AWS Using Terraform


    Profile picture for Ajeet Raina
    Author:
    Ajeet Raina, Former Developer Growth Manager at Redis
    Profile picture for Rahul Chauhan
    Author:
    Rahul Chauhan, Corporate Solution Architect at Redis

    terraform

    Development teams today are embracing more and more DevOps principles, such as continuous integration and continuous delivery (CI/CD). Therefore, the need to manage infrastructure-as-code (IaC) has become an essential capability for any cloud service. IaC tools allow you to manage infrastructure with configuration files rather than through a graphical user interface. IaC allows you to build, change, and manage your infrastructure in a safe, consistent, and repeatable way by defining resource configurations that you can version, reuse, and share.

    A leading tool in the IaC space is HashiCorp Terraform, which supports the major cloud providers and services with its providers and modules cloud infrastructure automation ecosystem for provisioning, compliance, and management of any cloud, infrastructure, and service

    What is Terraform?

    Terraform is an open source IaC software tool that provides a consistent CLI workflow to manage hundreds of cloud services. Terraform codifies cloud APIs into declarative configuration files, which can then be shared amongst team members, treated as code, edited, reviewed, and versioned. It enables you to safely and predictably create, change, and improve infrastructure.

    Capabilities of Terraform

    • Terraform is not just a configuration management tool. It also focuses on the higher-level abstraction of the data center and associated services, while allowing you to use configuration management tools on individual systems.
    • It supports multiple cloud providers, such as AWS, GCP, Azure, DigitalOcean, etc.
    • It provides a single unified syntax, instead of requiring operators to use independent and non-interoperable tools for each platform and service.
    • Manages both existing service providers and custom in-house solutions.
    • Terraform is easily portable to any other provider.
    • Provides immutable infrastructure where configuration changes smoothly.
    • Supports client-only architecture, so no need for additional configuration management on a server.
    • Terraform is very flexible, using a plugin-based model to support providers and provisioners, giving it the ability to support almost any service that exposes APIs.
    • It is not intended to give low-level programmatic access to providers, but instead provides a high-level syntax for describing how cloud resources and services should be created, provisioned, and combined.
    • It provides a simple, unified syntax, allowing almost any resource to be managed without learning new tooling.

    The HashiCorp Terraform Redis Enterprise Cloud provider

    Redis has developed a Terraform provider for Redis Enterprise Cloud. The HashiCorp Terraform Redis Enterprise Cloud provider allows customers to deploy and manage Redis Enterprise Cloud subscriptions, databases, and network peering easily as code, on any cloud provider. It is a plugin for Terraform that allows Redis Enterprise Cloud Flexible customers to manage the full life cycle of their subscriptions and related Redis databases.

    The Redis Enterprise Cloud provider is used to interact with the resources supported by Redis Enterprise Cloud. The provider needs to be configured with the proper credentials before it can be used. Use the navigation to the left to read about the available provider resources and data sources.

    rediscloud

    Before we jump into the implementation, let us take a moment to better understand the Terraform configuration. A Terraform configuration is a complete document in the Terraform language that tells Terraform how to manage a given collection of infrastructure. A configuration can consist of multiple files and directories. Terraform is broken down into three main components:

    • Providers
    • Data sources
    • Resources

    Providers

    A provider is the first resource that will need to be defined in any project under the Terraform configuration file. The provider gives you access to the API you will be interacting with to create resources. Once the provider has been configured and authenticated, a vast amount of resources are now available to be created. Terraform has more than 100+ cloud providers it serves.

    A provider defines resources and data for a particular infrastructure, such as AWS. As shown below, the terraform block {} contains terraform settings, including the required providers Terraform will use to provision your infrastructure (for example, rediscloud provider).

     terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }

    The provider {} block configures the specific provider. In the following example, it is AWS.

     cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }

    Resources

    Resources are the most important element in the Terraform language. This is where you describe the piece of infrastructure to be created, and this can range from a compute instance to defining specific permissions and much more.

    As shown below, the resource {} block is used to define components of your infrastructure. A resource might be a physical or virtual component, such as EC2, or it could be a logical component, such as a Heroku application.

     resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    }

    The resource {} block has two strings before the block: resource types and resource names. The prefix of the type maps to the name of the provider. For example, the resource type “random_password” and the resource name “passwords” form a unique identifier of the resource. Terraform uses this ID to identify the resource.

    Data sources

    Data sources allow Terraform to use information defined outside of Terraform, defined by another separate Terraform configuration, or modified by functions. Each provider may offer data sources alongside its set of resource types. A data source is accessed via a special kind of resource known as a data resource, declared using a data block.

     data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }

    A data block requests that Terraform read from a given data source ("rediscloud_payment_method") and export the result under the given local name ("card"). The name is used to refer to this resource from elsewhere in the same Terraform module, but has no significance outside of the scope of a module.

    Within the block body (between { and }) are query constraints defined by the data source. Most arguments in this section depend on the data source, and indeed in this example card_type and last_four_numbers are all arguments defined specifically for the rediscloud_payment_method data source.

    Configure Redis Enterprise Cloud programmatic access

    In order to set up authentication with the Redis Enterprise Cloud provider, a programmatic API key must be generated for Redis Enterprise Cloud. The Redis Enterprise Cloud documentation contains the most up-to-date instructions for creating and managing your key(s) and IP access.

    tip

    Flexible and Annual Redis Enterprise Cloud subscriptions can leverage a RESTful API that permits operations against a variety of resources, including servers, services, and related infrastructure. The REST API is not supported for Fixed or Free subscriptions.

     provider "rediscloud" { } # Example resource configuration
    resource "rediscloud_subscription" "example" { # ... }

    Prerequisites:

    • Install Terraform on MacOS.
    • Create a free Redis Enterprise Cloud account.
    • Create your first subscription.
    • Enable API

    Step 1: Install Terraform on MacOS

    Use Homebrew to install Terraform on MacOS as shown below:

     brew install terraform

    Step 2: Sign up for a free Redis Enterprise Cloud account

    Follow this tutorial to sign up for a free Redis Enterprise Cloud account.

    Redis Cloud

    Step 3: Enable Redis Enterprise Cloud API

    If you have a Flexible (or Annual) Redis Enterprise Cloud subscription, you can use a REST API to manage your subscription programmatically. The Redis Cloud REST API is available only to Flexible or Annual subscriptions. It is not supported for Fixed or Free subscriptions.

    For security reasons, the Redis Cloud API is disabled by default. To enable the API:

    • Sign in to your Redis Cloud subscription as an account owner.

    • From the menu, choose Access Management.

    • When the Access Management screen appears, select the API Keys tab.

    Terraform

    If a Copy button appears to the right of the API account key, the API is enabled. This button copies the account key to the clipboard.

    If you see an Enable API button, select it to enable the API and generate your API account key.

    To authenticate REST API calls, you need to combine the API account key with an API user key to make API calls.

    Terraform

    Step 4: Create a main.tf file

    It’s time to create an empty “main.tf” file and start adding the provider, resource and data sources as shown below:

     terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }
    # Provide your credit card details
    data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }
    # Generates a random password for the database
    resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    special = false
    }
    resource "rediscloud_subscription" "rahul-test-terraform" {
    name = "rahul-test-terraform"
    payment_method_id = data.rediscloud_payment_method.card.id
    memory_storage = "ram"
    cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }
    database {
    name = "db-json"
    protocol = "redis"
    memory_limit_in_gb = 1
    replication = true
    data_persistence = "aof-every-1-second"
    module {
    name = "RedisJSON"
    }
    throughput_measurement_by = "operations-per-second"
    throughput_measurement_value = 10000
    password = random_password.passwords[1].result
    }
    }

    Step 5: Create an execution plan

    The Terraform plan command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan, it reads the current state of any already existing remote objects to make sure that Terraform state is up to date. It then compares the current configuration to the prior state and then proposes a set of change actions that should make the remote object match the configuration.

     % terraform plan


    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    + create

    Terraform will perform the following actions:

    # random_password.passwords[0] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # random_password.passwords[1] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # rediscloud_subscription.rahul-test-terraform will be created
    + resource "rediscloud_subscription" "rahul-test-terraform" {
    + id = (known after apply)
    + memory_storage = "ram"
    + name = "rahul-test-terraform"
    + payment_method_id = "XXXX"
    + persistent_storage_encryption = true

    + cloud_provider {
    + cloud_account_id = "1"
    + provider = "AWS"

    + region {
    + multiple_availability_zones = false
    + networking_deployment_cidr = "10.0.0.0/24"
    + networks = (known after apply)
    + preferred_availability_zones = [
    + "us-east-1a",
    ]
    + region = "us-east-1"
    }
    }

    + database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 3 to add, 0 to change, 0 to destroy.

    ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

    :::note

    You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.

    :::

    Step 6: Execute the action

    The Terraform apply command executes the actions proposed in a Terraform plan.

     terraform apply


    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    + create

    Terraform will perform the following actions:

    # random_password.passwords[0] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # random_password.passwords[1] will be created
    + resource "random_password" "passwords" {
    + id = (known after apply)
    + length = 20
    + lower = true
    + min_lower = 0
    + min_numeric = 0
    + min_special = 0
    + min_upper = 0
    + number = true
    + result = (sensitive value)
    + special = false
    + upper = true
    }

    # rediscloud_subscription.rahul-test-terraform will be created
    + resource "rediscloud_subscription" "rahul-test-terraform" {
    + id = (known after apply)
    + memory_storage = "ram"
    + name = "rahul-test-terraform"
    + payment_method_id = "XXXX"
    + persistent_storage_encryption = true

    + cloud_provider {
    + cloud_account_id = "1"
    + provider = "AWS"

    + region {
    + multiple_availability_zones = false
    + networking_deployment_cidr = "10.0.0.0/24"
    + networks = (known after apply)
    + preferred_availability_zones = [
    + "us-east-1a",
    ]
    + region = "us-east-1"
    }
    }

    + database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 3 to add, 0 to change, 0 to destroy.

    Do you want to perform these actions?
    Terraform will perform the actions described above.
    Only 'yes' will be accepted to approve.

    Enter a value: yes

    random_password.passwords[0]: Creating...
    random_password.passwords[1]: Creating...
    random_password.passwords[1]: Creation complete after 0s [id=none]
    random_password.passwords[0]: Creation complete after 0s [id=none]
    rediscloud_subscription.rahul-test-terraform: Creating...
    rediscloud_subscription.rahul-test-terraform: Still creating... [10s elapsed]
    rediscloud_subscription.rahul-test-terraform: Still creating... [20s elapsed]
    rediscloud_subscription.rahul-test-terraform: Creation complete after 8m32s [id=1649277]

    Apply complete! Resources: 3 added, 0 changed, 0 destroyed.

    Step 7: Verify the database

    You can now verify the new database created under Subscription named “db-json.”

    Deploy a Redis Database with JSON and [other](Redis Stack ) features on AWS using Terraform:

    terraform {
    required_providers {
    rediscloud = {
    source = "RedisLabs/rediscloud"
    version = "0.2.2"
    }
    }
    }
    # Provide your credit card details
    data "rediscloud_payment_method" "card" {
    card_type = "Visa"
    last_four_numbers = "XXXX"
    }
    # Generates a random password for the database
    resource "random_password" "passwords" {
    count = 2
    length = 20
    upper = true
    lower = true
    number = true
    special = false
    }
    resource "rediscloud_subscription" "rahul-test-terraform" {
    name = "rahul-test-terraform"
    payment_method_id = data.rediscloud_payment_method.card.id
    memory_storage = "ram"
    cloud_provider {

    provider = "AWS"
    cloud_account_id = 1
    region {
    region = "us-east-1"
    networking_deployment_cidr = "10.0.0.0/24"
    preferred_availability_zones = ["us-east-1a"]
    }
    }
    database {
    name = "db-json"
    protocol = "redis"
    memory_limit_in_gb = 1
    replication = true
    data_persistence = "aof-every-1-second"
    module {
    name = "RedisJSON"
    }
    throughput_measurement_by = "operations-per-second"
    throughput_measurement_value = 10000
    password = random_password.passwords[1].result
    }
    }

    Step 8: Cleanup

    The Terraform destroy command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. While you will typically not want to destroy long-lived objects in a production environment, Terraform is sometimes used to manage ephemeral infrastructure for development purposes, in which case you can use terraform destroy’ to conveniently clean up all of those temporary objects once you are finished with your work.

    % terraform destroy
    random_password.passwords[0]: Refreshing state... [id=none]
    random_password.passwords[1]: Refreshing state... [id=none]
    rediscloud_subscription.rahul-test-terraform: Refreshing state... [id=1649277]

    Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
    - destroy

    Terraform will perform the following actions:

    # random_password.passwords[0] will be destroyed
    - resource "random_password" "passwords" {
    - id = "none" -> null
    - length = 20 -> null
    - lower = true -> null
    - min_lower = 0 -> null
    - min_numeric = 0 -> null
    - min_special = 0 -> null
    - min_upper = 0 -> null
    - number = true -> null
    - result = (sensitive value)
    - special = false -> null
    - upper = true -> null
    }

    # random_password.passwords[1] will be destroyed
    - resource "random_password" "passwords" {
    - id = "none" -> null
    - length = 20 -> null
    - lower = true -> null
    - min_lower = 0 -> null
    - min_numeric = 0 -> null
    - min_special = 0 -> null
    - min_upper = 0 -> null
    - number = true -> null
    - result = (sensitive value)
    - special = false -> null
    - upper = true -> null
    }

    # rediscloud_subscription.rahul-test-terraform will be destroyed
    - resource "rediscloud_subscription" "rahul-test-terraform" {
    - id = "1649277" -> null
    - memory_storage = "ram" -> null
    - name = "rahul-test-terraform" -> null
    - payment_method_id = "XXXX" -> null
    - persistent_storage_encryption = true -> null

    - cloud_provider {
    - cloud_account_id = "1" -> null
    - provider = "AWS" -> null

    - region {
    - multiple_availability_zones = false -> null
    - networking_deployment_cidr = "10.0.0.0/24" -> null
    - networks = [
    - {
    - networking_deployment_cidr = "10.0.0.0/24"
    - networking_subnet_id = "subnet-0055e8e3ee3ea796e"
    - networking_vpc_id = ""
    },
    ] -> null
    - preferred_availability_zones = [
    - "us-east-1a",
    ] -> null
    - region = "us-east-1" -> null
    }
    }

    - database {
    # At least one attribute in this block is (or was) sensitive,
    # so its contents will not be displayed.
    }
    }

    Plan: 0 to add, 0 to change, 3 to destroy.

    Do you really want to destroy all resources?
    Terraform will destroy all your managed infrastructure, as shown above.
    There is no undo. Only 'yes' will be accepted to confirm.

    Enter a value: yes

    rediscloud_subscription.rahul-test-terraform: Destroying... [id=1649277]

    rediscloud_subscription.rahul-test-terraform: Destruction complete after 1m34s
    random_password.passwords[0]: Destroying... [id=none]
    random_password.passwords[1]: Destroying... [id=none]
    random_password.passwords[0]: Destruction complete after 0s
    random_password.passwords[1]: Destruction complete after 0s

    Destroy complete! Resources: 3 destroyed.

    Further References:

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/course-wrap-up/index.html b/operate/redis-at-scale/course-wrap-up/index.html index cefa701ecd..7be326053d 100644 --- a/operate/redis-at-scale/course-wrap-up/index.html +++ b/operate/redis-at-scale/course-wrap-up/index.html @@ -4,7 +4,7 @@ Conclusion of Running Redis at Scale | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Conclusion of Running Redis at Scale


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis
    Profile picture for Elena Kolevska
    Author:
    Elena Kolevska, Technical Enablement Manager, EMEA at Redis
    Profile picture for Kurt Moeller
    Author:
    Kurt Moeller, Technical Enablement Manager, US at Redis

    You've made it! Thanks again for trying out this course on the Redis Developer Site. We hope you've enjoyed it, and we hope it's provided you with the tools you need to successfully scale Redis with your applications.



    If you would like to receive a certificate of completion for this course, head to Redis University to enroll in the full-format class which includes homework for each course section. If you pass the course with a grade of sixty-five percent or greater, you'll be able to generate your certificate and post it to your LinkedIn profile.


    Please consider subscribing to our Youtube Channel to stay up to date with all of our latest tutorials, interviews, and general news.

    And if you have any feedback or insights you want to share with the Redis University team, don't hesitate to leave a note in our online chat on our Discord server found here.

    Again, we're grateful you've taken the time to work through our course. Happy learning and see you next time!

    Best wishes,

    Elena, Kurt, and Justin

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/basic-replication/index.html b/operate/redis-at-scale/high-availability/basic-replication/index.html index c780587b5a..31cda526b8 100644 --- a/operate/redis-at-scale/high-availability/basic-replication/index.html +++ b/operate/redis-at-scale/high-availability/basic-replication/index.html @@ -4,7 +4,7 @@ 3.1 Basic Replication | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.1 Basic Replication



    Replication in Redis follows a simple primary-replica model where the replication happens in one direction - from the primary to one or multiple replicas. Data is only written to the primary instance and replicas are kept in sync so that they’re exact copies of the primaries.

    To create a replica, you instantiate a Redis server instance with the configuration directive replicaof set to the address and port of the primary instance. Once the replica instance is up and running, the replica will try to sync with the primary. To transfer all of its data as efficiently as possible, the primary instance will produce a compacted version of the data in a snapshot (.rdb) file and send it to the replica.

    The replica will then read the snapshot file and load all of its data into memory, which will bring it to the same state the primary instance had at the moment of creating the .rdb file. When the loading stage is done, the primary instance will send the backlog of any write commands run since the snapshot was made. Finally, the primary instance will send the replica a live stream of all subsequent commands.

    By default, replication is asynchronous. This means that if you send a write command to Redis you will receive your acknowledged response first, and only then will the command be replicated to the replica.

    If the primary goes down after acknowledging a write but before the write can be replicated, then you might have data loss. To avoid this, the client can use the WAIT command. This command blocks the current client until all of the previous write commands are successfully transferred and acknowledged by at least some specified number of replicas.

    For example, if we send the command WAIT 2 0, the client will block (will not return a response to the client) until all of the previous write commands issued on that connection have been written to at least 2 replicas. The second argument - 0 - will instruct the server to block indefinitely, but we could set it to a number (in milliseconds) so that it times out after a while and returns the number of replicas that successfully acknowledged the commands.

    Replicas are read-only. This means that you can configure your clients to read from them, but you cannot write data to them. If you need additional read throughput, you can configure your Redis client to read from replicas as well as from your primary node. However, it's often easier just to scale out your cluster. This lets you scale reads and writes without writing any complex client logic.

    Also, you should know about Active-Active, an advanced feature of Redis Enterprise and Redis Cloud. Active-Active replicates entire databases across geographically-distributed clusters. With Active-Active, you can write locally to any replica databases, and those writes will be reflected globally. Something to keep in mind when you're really scaling out!

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/exercise-1/index.html b/operate/redis-at-scale/high-availability/exercise-1/index.html index 8cc420dd8b..6c571a0c19 100644 --- a/operate/redis-at-scale/high-availability/exercise-1/index.html +++ b/operate/redis-at-scale/high-availability/exercise-1/index.html @@ -4,7 +4,7 @@ 3.2 Exercise - Enabling Basic Replication | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.2 Exercise - Enabling Basic Replication

    Step 1

    First let’s create and configure the primary instance. We’ll start with a few configuration changes in its primary.conf configuration file.

    $ touch primary.conf  # Create the configuration file

    Now open the primary.conf file with your favorite text editor and set the following configuration directives:

    # Create a strong password here
    requirepass a_strong_password

    # AUTH password of the primary instance in case this instance becomes a replica
    masterauth a_strong_password

    # Enable AOF file persistence
    appendonly yes

    # Choose a name for the AOF file
    appendfilename "primary.aof"

    Finally, let’s start the primary instance:

    $ redis-server ./primary.conf

    Step 2

    Next, let’s prepare the configuration file for the replica:

    $ touch replica.conf

    Let’s add some settings to the file we just created:

    # Port on which the replica should run
    port 6380

    # Address of the primary instance
    replicaof 127.0.0.1 6379

    # AUTH password of the primary instance
    masterauth a_strong_password

    # AUTH password for the replica instance
    requirepass a_strong_password

    And let’s start the replica:

    $ redis-server ./replica.conf

    Step 3

    Open two terminal tabs and use them to start connections to the primary and replica instances:

    # Tab 1 (primary)
    $ redis-cli
    # Tab 2 (replica)
    $ redis-cli -p 6380

    Authenticate on both tabs by running the command AUTH followed by your password:

    AUTH a_strong_password

    On the second (replica) tab run the MONITOR command which will allow you to see every command executed against that instance.

    Go back to the first (primary) tab and execute any write command, for example

    127.0.0.1:6379> SET foo bar

    In the second tab you should see that the command was already sent to the replica:

    1617230062.389077 [0 127.0.0.1:6379] "SELECT" "0"
    1617230062.389092 [0 127.0.0.1:6379] "set" "foo" "bar"

    Step 4

    Keep the instances running, or at least their configuration files around. We’ll need them for the next exercise.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/exercise-2/index.html b/operate/redis-at-scale/high-availability/exercise-2/index.html index 90091b45ec..b89cf6b8a7 100644 --- a/operate/redis-at-scale/high-availability/exercise-2/index.html +++ b/operate/redis-at-scale/high-availability/exercise-2/index.html @@ -4,7 +4,7 @@ 3.4 Exercise - Sentinel Hands-on | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.4 Exercise - Sentinel Hands-on

    Step 1

    If you still have the primary and replica instances we set up in the previous exercise (3.2) - great! We’ll reuse them to create our Sentinel setup. If not - refer back to the instructions and go through them again.

    When done, you will have a primary Redis instance with one replica.

    Step 2

    To initialise a Redis Sentinel, you need to provide a configuration file, so let’s go ahead and create one:

    $ touch sentinel1.conf

    Open the file and paste in the following settings:

    port 5000
    sentinel monitor myprimary 127.0.0.1 6379 2
    sentinel down-after-milliseconds myprimary 5000
    sentinel failover-timeout myprimary 60000
    sentinel auth-pass myprimary a_strong_password
    Breakdown of terms:
    • port - The port on which the Sentinel should run
    • sentinel monitor - monitor the Primary on a specific IP address and port. Having the address of the Primary the Sentinels will be able to discover all the replicas on their own. The last argument on this line is the number of Sentinels needed for quorum. In our example - the number is 2.
    • sentinel down-after-milliseconds - how many milliseconds should an instance be unreachable so that it’s considered down
    • sentinel failover-timeout - if a Sentinel voted another Sentinel for the failover of a given master, it will wait this many milliseconds to try to failover the same master again.
    • sentinel auth-pass - In order for Sentinels to connect to Redis server instances when they are configured with requirepass, the Sentinel configuration must include the sentinel auth-pass directive.

    Step 3

    Make two more copies of this file - sentinel2.conf and sentinel3.conf and edit them so that the PORT configuration is set to 5001 and 5002, respectively.

    Step 4

    Let’s initialise the three Sentinels in three different terminal tabs:

    # Tab 1
    $ redis-server ./sentinel1.conf --sentinel
    # Tab 2
    $ redis-server ./sentinel2.conf --sentinel
    # Tab3
    $ redis-server ./sentinel3.conf --sentinel

    Step 5

    If you connected to one of the Sentinels now you would be able to run many new commands that would give an error if run on a Redis instance. For example:

    # Provides information about the Primary
    SENTINEL master myprimary

    # Gives you information about the replicas connected to the Primary
    SENTINEL replicas myprimary

    # Provides information on the other Sentinels
    SENTINEL sentinels myprimary

    # Provides the IP address of the current Primary
    SENTINEL get-master-addr-by-name myprimary

    Step 6

    If we killed the primary Redis instance now by pressing Ctrl+C or by running the redis-cli -p 6379 DEBUG sleep 30 command, we’ll be able to observe in the Sentinels’ logs that the failover process will start in about 5 seconds. If you run the command that returns the IP address of the Primary again you will see that the replica has been promoted to a Primary:

    redis> SENTINEL get-master-addr-by-name myprimary
    1) "127.0.0.1"
    2) "6380"
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/index.html b/operate/redis-at-scale/high-availability/index.html index 6b5dd21dfe..fcd8cbb247 100644 --- a/operate/redis-at-scale/high-availability/index.html +++ b/operate/redis-at-scale/high-availability/index.html @@ -4,7 +4,7 @@ Ensuring High Availability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring High Availability in Redis

    Hello World!

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/introduction/index.html b/operate/redis-at-scale/high-availability/introduction/index.html index 5341fe5622..fcd5fb9ba0 100644 --- a/operate/redis-at-scale/high-availability/introduction/index.html +++ b/operate/redis-at-scale/high-availability/introduction/index.html @@ -4,7 +4,7 @@ 3.0 Introduction to High Availability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.0 Introduction to High Availability



    High availability is a computing concept describing systems that guarantee a high level of uptime, designed to be fault-tolerant, highly dependable, operating continuously without intervention and without a single point of failure.

    What does this mean for Redis specifically? Well, it means that if your primary Redis server fails, a backup will kick in, and you, as a user, will see little to no disruption in the service. There are two components needed for this to be possible: replication and automatic failover.

    Replication is the continuous copying of data from a primary database to a backup, or a replica database. The two databases are usually located on different physical servers, so that we can have a functional copy of our data in case we lose the server where our primary database sits.

    But having a backup of our data is not enough for high availability. We also have to have a mechanism that will automatically kick in and redirect all requests towards the replica in the event that the primary fails. This mechanism is called automatic failover.

    In the rest of this section we’ll see how Redis handles replication and which automatic failover solutions it offers. Let’s dig in.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/high-availability/understanding-sentinels/index.html b/operate/redis-at-scale/high-availability/understanding-sentinels/index.html index bb5a0998a7..9f6abd645f 100644 --- a/operate/redis-at-scale/high-availability/understanding-sentinels/index.html +++ b/operate/redis-at-scale/high-availability/understanding-sentinels/index.html @@ -4,7 +4,7 @@ 3.3 Understanding Sentinels | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    3.3 Understanding Sentinels

    In the beginning of this unit, we learned that we can’t have high availability without replication and automatic failover. We covered replication in the previous two chapters, and now we’ll explain Sentinel - a tool that provides the automatic failover.

    Redis Sentinel is a distributed system consisting of multiple Redis instances started in sentinel mode. We call these instances Sentinels.

    The group of Sentinels monitors a primary Redis instance and its replicas. If the sentinels detect that the primary instance has failed, the sentinel processes will look for the replica that has the latest data and will promote that replica to be the new primary. This way, the clients talking to the database will be able to reconnect to the new primary and continue functioning as usual, with minimal disruption to the users.


    Sentinel Quorum Diagram

    Deciding that a primary instance is down

    In order for the Sentinels to be able to decide that a primary instance is down we need to have enough Sentinels agree that the server is unreachable from their point of view.

    Having a number of Sentinels agreeing that they need to take an action is called reaching a quorum. If the Sentinels can’t reach quorum, they cannot decide that the primary has failed. The exact number of Sentinels needed for quorum is configurable.

    Triggering a failover

    Once the Sentinels have decided that a primary instance is down, they need to elect and authorize a leader (a Sentinel instance) that will do the failover. A leader can only be chosen if the majority of the Sentinels agree on it.

    In the final step, the leader will reconfigure the chosen replica to become a primary by sending the command REPLICAOF NO ONE and it will reconfigure the other replicas to follow the newly promoted primary.

    Sentinel and Client Libraries

    If you have a system that uses Sentinel for high availability, then you need to have a client that supports Sentinel. Not all libraries have this feature, but most of the popular ones do, so make sure you add it to your list of requirements when choosing your library.

    Further Reading

    For more information on Redis Sentinel, check out the documentation on redis.io.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/index.html b/operate/redis-at-scale/index.html index 4ae7cea014..65c35c04b8 100644 --- a/operate/redis-at-scale/index.html +++ b/operate/redis-at-scale/index.html @@ -4,7 +4,7 @@ Introduction to Running Redis at Scale | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    Introduction to Running Redis at Scale


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis
    Profile picture for Elena Kolevska
    Author:
    Elena Kolevska, Technical Enablement Manager, EMEA at Redis
    Profile picture for Kurt Moeller
    Author:
    Kurt Moeller, Technical Enablement Manager, US at Redis

    Welcome



    The world's data is growing exponentially. That exponential growth means that database systems must scale. This is a course about running Redis, one of the most popular databases, at scale.

    So, how do you run Redis at scale? There are two general answers to this question, and it's important that we address them right away. That's because the easiest and most common way to run Redis at scale is to let someone else manage your Redis deployment for you.

    The convenience of "database-as-a-service" offerings means that you don't have to know much about how your database scales, and that saves a lot of time and potential false starts.

    We at Redis offer Redis Cloud, a highly available cloud-based Redis service that provides a lot of features you can't find anywhere else, like active-active, geo-distribution.

    Redis Cloud is also really easy to use and has a free tier so you can get going quickly. So, that's the first answer. To run Redis these days, you might just use a fully-managed offering like Redis Cloud. But not everyone can or wants to use a cloud-hosted database.

    There are a bunch of reasons for this. For example, maybe you're a large enterprise with your own data centers and dedicated ops teams. Or perhaps you're a mission-critical application whose SLAs are so rigid that you need to be able to dig deeply into any potential performance issue. This often rules out cloud-based deployments, since the cloud hides away the hardware and networks you're operating in. In this case, you're deploying Redis on your own. And for that, you need to know how Redis scales.

    Learning this isn't just useful; it's also genuinely interesting. Sharding, replication, high availability, and disaster recovery are all important concepts that anyone can understand with the right explanation. These concepts aren't rocket science. They're no harder to understand than basic high school math, and knowing about them makes you a better developer.In this course, we'll look closely at how open source Redis scales. And you'll learn by doing, as we present a lot of the ideas through hands-on labs.

    These ideas will apply whether you're deploying open source Redis on your own or managing a Redis Enterprise cluster - which is, ultimately, what you'll want to reach for if you ever outgrow open source Redis. These are some important topics to consider during your time with this course. But let's first learn how to walk before we run.

    We sincerely hope you enjoy what you learn with us about scaling Redis, and as always, it's my pleasure to help.

    Course Overview

    This course is broken up into units covering topics around scaling Redis for production deployment.

    Scaling means more than just performance. We have tried to identify key topics that will help you have a performant, stable, and secure deployment of Redis. This course is divided into the following units:

    • Talking to Redis: connection management and tuning of Redis.
    • Persistence/Durability: options persisting Redis data to disk.
    • High Availability: how to make sure Redis and your data is always there.
    • Scalability: scaling Redis for both higher throughput and capacity.
    • Observability: Visibility into your Redis deployment (metrics, etc.).

    Our goal is to give you all the information you need to run Redis at scale, in whichever way is best for your organization. We hope you enjoy the course, and please don't hesitate to reach out on the course Discord channel if you have any questions along the way.

    Prerequisites

    • Access to a Linux-based system and familiarity with it
    • Redis server and redis-cli installed (examples and exercises assume redis-server is in the $PATH) docker and docker-compose installed
    • A git client and access to clone repos in Github. Some exercises will come from the following repository: https://github.com/redislabs-training/ru301
    note

    This repo contains sample demonstrations of Redis running in various scaled configurations, and is not directly correlated with all of the exercises in this course. See the specific exercise instructions for usage.

    Assumptions

    • Comfortable with Linux Bash shell exercises
    • Legacy terminology in Redis uses 'master' and 'slave' but in the course we will use 'primary' and 'replica'. You will still see the legacy terms in many commands, configurations, and field names.
    • We will use $ to indicate command line prompt and > to indicate a redis-cli prompt
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/data-points-in-redis/index.html b/operate/redis-at-scale/observability/data-points-in-redis/index.html index 2052edc303..4b6e657457 100644 --- a/operate/redis-at-scale/observability/data-points-in-redis/index.html +++ b/operate/redis-at-scale/observability/data-points-in-redis/index.html @@ -4,7 +4,7 @@ 5.1 Data points in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.1 Data points in Redis

    There are several Redis metrics that can be viewed through redis-cli.

    Redis INFO command

    Running the INFO command provides many of the metrics available in a single view.

    127.0.0.1:6379> INFO
    # Server
    redis_version:6.0.1
    redis_git_sha1:00000000
    redis_git_dirty:0
    redis_build_id:e02d1d807e41d65
    redis_mode:standalone
    os:Linux 4.19.121-linuxkit x86_64

    There are several sections that can be pulled individually. For example, if you wanted to just get the CLIENTS section you can pass that section as an argument to the INFO command.

    127.0.0.1:6379> INFO CLIENTS
    # Clients
    connected_clients:1
    client_recent_max_input_buffer:2
    client_recent_max_output_buffer:0
    blocked_clients:0
    tracking_clients:0
    clients_in_timeout_table:0

    Sections

    Server: the current Redis server info.

    Metrics of note:

    • redis_version
    • process_id
    • config_file
    • uptime_in_seconds
    • uptime_in_days

    Clients: available data on clients connected or failed connections.

    Metrics of note:

    • connected_clients
    • blocked_clients

    Memory: memory usage and stats

    Metrics of note:

    • used_memory
    • mem_fragmentation_ratio

    Persistence: RDB or AOF metrics

    Metrics of note:

    • rdb_last_save_time
    • rdb_changes_since_last_save
    • aof_rewrite_in_progress

    Stats: some general statistics

    Metrics of note:

    • keyspace_hits
    • keyspace_misses
    • expired_keys
    • evicted_keys
    • instantaneous_ops_per_sec

    Replication: replication data including primary/replica identifiers and offsets

    Metrics of note:

    • master_link_down_since
    • connected_slaves
    • master_last_io_seconds_ago

    CPU: compute consumption stats

    Metrics of note:

    • used_cpu_sys
    • used_cpu_user

    Modules: data from any loaded modules

    Metrics of note (per module):

    • ver
    • options

    Cluster: whether cluster is enabled

    Metric of note:

    • cluster_enabled

    Keyspace: keys and expiration data

    Metrics of note (per db):

    • keys
    • expires
    • avg_ttl

    The output can be read from the results or piped into a file.

    127.0.0.1:6379> redis-cli INFO STATS > redis-info-stats

    This could be done at intervals and consumed by a local or third party monitoring service.

    Some of the data returned by INFO are going to be static.  For example the Redis version which won't change until an update is made.  Other data is dynamic, for keyspace_hits ÷ keyspace_misses. The latter could be taken to compute a hit ratio and observed as a long term metric. The replication section field master_link_down_since could be a metric to connect an alert.

    Some examples of possible alerts that could be setup for a given metric:

    MetricExample Alert
    uptime_in_seconds< 300 seconds: to ensure the server is staying up
    connected_clients< minimum number of expected application connections
    master_link_down_since> 30 seconds: replication should be operational
    rdb_last_save_time> maximum acceptable interval without taking a snapshot
    note

    This is not an exhaustive list, but just to give you an idea of how the metrics in INFO could be used.

    Latency and stats data via redis-cli options

    The redis-cli client has some built-in options that allow you to pull some real-time latency and stats data.

    note

    These are not available as commands from Redis but as options in redis-cli.

    Latency options:

    Continuously sample latency:

    $ redis-cli --latency
    min: 1, max: 17, avg: 4.03 (927 samples)

    The raw or csv output flag can be added:

    $ redis-cli --latency --csv
    1,4,1.94,78

    In order to sample for longer than one second you can use latency-history which has a default interval of 15 seconds but can be specified using the -i param.

    $ redis-cli --latency-history -i 60
    min: 1, max: 30, avg: 4.84 (328 samples)

    This can also be combined with the csv or raw output format flag.

    $ redis-cli --latency-history -i 60 --csv
    13,13,13.00,1
    5,13,9.00,2
    3,13,7.00,3
    3,13,6.00,4
    3,13,5.60,5
    2,13,5.00,6
    2,13,5.43,7
    2,13,5.62,8
    2,13,5.22,9
    2,13,5.00,10
    1,13,4.64,11

    Both of these could be piped to a file as well.

    The latency-dist option shows latency as a spectrum. The default interval is one second but can be changed using the -i param.

    Latency Distribution diagramStats option:

    Get rolling stats from the server using the stat flag.

    $ redis-cli --stat
    ------- data ------ --------------------- load -------------------- - child -
    keys mem clients blocked requests connections
    4 9.98M 51 0 8168035 (+0) 4132
    4 9.98M 51 0 8181542 (+13507) 4132
    4 9.98M 51 0 8196100 (+14558) 4132
    4 9.98M 51 0 8209794 (+13694) 4132
    4 9.98M 51 0 8223420 (+13626) 4132
    4 9.98M 51 0 8236624 (+13204) 4132
    4 9.98M 51 0 8251376 (+14752) 4132
    4 9.98M 51 0 8263417 (+12041) 4182
    4 9.98M 51 0 8276781 (+13364) 4182
    4 9.90M 51 0 8289693 (+12912) 4182

    Memory stats

    Redis includes a MEMORY command that includes a subcommand to get stats.

    127.0.0.1:6379> memory stats
    1) "peak.allocated"
    2) (integer) 11912984
    3) "total.allocated"
    4) (integer) 8379168
    5) "startup.allocated"
    6) (integer) 5292168
    7) "replication.backlog"
    8) (integer) 0
    9) "clients.slaves"
    10) (integer) 0
    11) "clients.normal"
    12) (integer) 16986
    13) "aof.buffer"
    14) (integer) 0

    These values are available in the INFO MEMORY command as well, but here they are returned in a typical Redis RESP Array reply.

    There is also a LATENCY DOCTOR subcommand with an analysis report of the current memory metrics.

    Latency Monitoring

    As we know Redis is fast and as a result is often used in very extreme scenarios where low latency is a must. Redis has a feature called Latency Monitoring which allows you to dig into possible latency issues. Latency monitoring is composed of the following conceptual parts:

    • Latency hooks that sample different latency sensitive code paths.
    • Time series recording of latency spikes split by different events.
    • A reporting engine to fetch raw data from the time series.
    • Analysis engine to provide human readable reports and hints according to the measurements.

    By default this feature is disabled because most of the time it is not needed. In order to enable it you can update the threshold time in milliseconds that you want to monitor in your Redis configuration. Events that take longer than the threshold will be logged as latency spikes. The threshold configuration should be set accordingly if the requirement is to identify all events blocking the server for a time of 10 milliseconds or more.

    latency-monitor-threshold 10

    If the debugging session is intended to be temporary the threshold can be set via redis-cli.

    127.0.0.1:6379> CONFIG SET latency-monitor-threshold 10

    To disable the latency framework the threshold should be set back to 0.

    127.0.0.1:6379> CONFIG SET latency-monitor-threshold 0

    The latency data can be viewed using the LATENCY command with it's subcommands:

    • LATENCY LATEST - latest samples for all events
    • LATENCY HISTORY - latest time series for a given event
    • LATENCY RESET - resets the time series data
    • LATENCY GRAPH - renders an ASCII-art graph
    • LATENCY DOCTOR - analysis report

    In order to make use of these commands you need to make yourself familiar with the different events that the latency monitoring framework is tracking. (taken from https://redis.io/topics/latency-monitor" )

    EventDescription
    commandregular commands
    fast-commandO(1) and O(log N) commands
    forkthe fork(2) system call
    comrdb-unlink-temp-filethe unlink(2) system call
    aof-writewriting to the AOF - a catchall event fsync(2) system calls
    aof-fsync-alwaysthe fsync(2) system call when invoked by the appendfsync allways policy
    aof-write-pending-fsyncthe fsync(2) system call when there are pending writes
    aof-write-active-childthe fsync(2) system call when performed by a child process
    aof-write-alonethe fsync(2) system call when performed by the main process
    aof-fstatthe fstat(2) system call
    aof-renamethe rename(2) system call for renaming the temporary file after completing BGREWRITEAOF
    aof-rewrite-diff-writewriting the differences accumulated while performing BGREWRITEAOF
    active-defrag-cyclethe active defragmentation cycle
    expire-cyclethe expiration cycle
    eviction-cyclethe eviction cycle
    eviction-deldeletes during the eviction cycle

    For example, you can use the LATENCY LATEST subcommand and you may see some data like this:

    127.0.0.1:6379> latency latest
    1) 1) "command"
    2) (integer) 1616372606
    3) (integer) 600
    4) (integer) 600
    2) 1) "fast-command"
    2) (integer) 1616372434
    3) (integer) 12
    4) (integer) 12

    The results of this command provide the timestamp, latency and max latency for this event. Utilizing the events table above I can see we had latency spikes for a regular command with the latest and max latency of 600 MS while a O(1) or O(log N) command had a latency spike of 12 MS.

    Some of the latency commands require a specific event be passed.

    127.0.0.1:6379> latency graph command
    command - high 600 ms, low 100 ms (all time high 600 ms)
    --------------------------------------------------------------------------------
    _##
    o|||
    o||||
    _#|||||

    3222184
    05308ss
    sssss

    While the cost of enabling latency monitoring is near zero and memory requirements are very small it will raise your baseline memory usage so if you are getting the required performance out of Redis there is no need to leave this enabled.

    Monitoring Tools

    There are many open source monitoring tools and services to visualize your Redis metrics - some of which also provide alerting capabilities.

    One example of this is the Redis Data Source for Grafana. It is a Grafana plug-in that allows users to connect to the Redis database and build dashboards to easily observe Redis data. It provides an out-of-the-box predefined dashboard but also lets you build customized dashboards tuned to your specific needs.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/exercise-1/index.html b/operate/redis-at-scale/observability/exercise-1/index.html index 9b6be71379..0d1e836c9c 100644 --- a/operate/redis-at-scale/observability/exercise-1/index.html +++ b/operate/redis-at-scale/observability/exercise-1/index.html @@ -4,7 +4,7 @@ 5.2 Getting Redis Statistics | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.2 Getting Redis Statistics

    Clone this repo if you have not already: https://github.com/redislabs-training/ru301

    Change into the observability-stats directory.

    Requirements
    • docker
    • docker-compose
    • internet connection
    Starting Environment
    $ docker-compose up -d
    Connect to the Environment

    In a terminal run this command to get a shell prompt inside the running Docker container:

    $ docker-compose exec redis_stats bash
    Generate load

    A simple way to to generate some load is to open another terminal and run:

    $ docker-compose exec redis_stats redis-benchmark
    Info

    Since most of the stats data comes from the INFO command you should first run this to view that there.

    $ redis-cli INFO

    Try piping this output to a file.

    Memory usage

    Since we generally recommend setting the maxmemory size, it is possible to calculate the percentage of memory in use and alert based on results of the maxmemory configuration value and the used_memory stat.

    First set the maxmemory.

    $ redis-cli config set maxmemory 100000

    Then you can pull the two data points to see how that could be used to calculate memory usage.

    $ redis-cli INFO | grep used_memory:
    $ redis-cli CONFIG GET maxmemory
    Client data

    You can pull the clients section of the INFO command:

    $ redis-cli info clients

    or maybe a particular metric you would want to track:

    $ redis-cli info clients | grep connected_clients
    Stats section

    Use redis-cli to list the full 'stats' section.

    Hit ratio

    A cache hit/miss ratio could be generated using two data points in the stats section.

    $ redis-cli INFO stats | grep keyspace
    Evicted keys

    Eviction occurs when Redis has reached its maximum memory and maxmemory-policy in redis.conf is set to something other than volatile-lru.

    $ redis-cli INFO stats | grep evicted_keys
    Expired keys

    It is a good idea to keep an eye on the expirations to make sure Redis is performing as expected.

    $ redis-cli INFO stats | grep expired_keys
    Keyspace

    The following data could be used for graphing the size of the keyspace as a quick drop or spike in the number of keys is a good indicator of issues.

    $ redis-cli INFO keyspace
    Workload (connections received, commands processed)

    The following stats are a good indicator of workload on the Redis server.

    $ redis-cli INFO stats | egrep "^total_"
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/identifying-issues/index.html b/operate/redis-at-scale/observability/identifying-issues/index.html index 5d8720324f..af4b32a6f6 100644 --- a/operate/redis-at-scale/observability/identifying-issues/index.html +++ b/operate/redis-at-scale/observability/identifying-issues/index.html @@ -4,7 +4,7 @@ 5.3 Identifying Issues | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.3 Identifying Issues

    Besides the metrics from the data points from info, memory and the latency framework in the sections above, you may need to pull data from other sources when troubleshooting.

    Availability

    The Redis server will respond to the PING command when running properly:

    $ redis-cli -h redis.example.com -p 6379 PING
    PONG

    Slow Log

    Redis Slow Log is a system to log queries that exceed a specific execution time which does not include I/O operations like client communication. It is enabled by default with two configuration parameters.

    slowlog-log-slower-than 1000000

    This indicates if there is an execution time longer than the time in microseconds, in this case one second, it will be logged. The slow log can be disabled using a value of -1. It can also be set to log every command with a value of 0.

    slowlog-max-len 128

    This sets the length of the slow log. When a new command is logged the oldest one is removed from the queue.

    These values can also be changed at runtime using the CONFIG SET command.

    You can view the current length of the slow log using the LEN subcommand:

    redis.cloud:6379> slowlog len
    (integer) 11

    Entries can be pulled off of the slow log using the GET subcommand.

    redis.cloud:6379> slowlog get 2
    1) 1) (integer) 10
    2) (integer) 1616372606
    3) (integer) 600406
    4) 1) "debug"
    2) "sleep"
    3) ".6"
    5) "172.17.0.1:60546"
    6) ""
    2) 1) (integer) 9
    2) (integer) 1616372602
    3) (integer) 600565
    4) 1) "debug"
    2) "sleep"
    3) ".6"
    5) "172.17.0.1:60546"
    6) ""

    The slow log can be reset using the RESET subcommand.

    redis.cloud:6379> slowlog reset
    OK
    redis.cloud:6379> slowlog len
    (integer) 0

    Scanning keys

    There are a few options that can be passed to redis-cli that will trigger a keyspace analysis. They use the SCAN command so they should be safe to run without impacting operations. You can see in the output of all of them there is a throttling option if needed.


    Big Keys: This option will scan the dataset for big keys and provide information about them.

    $ redis-cli --bigkeys

    # Scanning the entire keyspace to find biggest keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Biggest string found so far '"counter:__rand_int__"' with 6 bytes
    [00.00%] Biggest hash found so far '"myhash"' with 1 fields
    [00.00%] Biggest list found so far '"mylist"' with 200000 items

    -------- summary -------

    Sampled 4 keys in the keyspace!
    Total key length in bytes is 48 (avg len 12.00)

    Biggest list found '"mylist"' has 200000 items
    Biggest hash found '"myhash"' has 1 fields
    Biggest string found '"counter:__rand_int__"' has 6 bytes

    1 lists with 200000 items (25.00% of keys, avg size 200000.00)
    1 hashs with 1 fields (25.00% of keys, avg size 1.00)
    2 strings with 9 bytes (50.00% of keys, avg size 4.50)
    0 streams with 0 entries (00.00% of keys, avg size 0.00)
    0 sets with 0 members (00.00% of keys, avg size 0.00)
    0 zsets with 0 members (00.00% of keys, avg size 0.00)

    Mem Keys: Similarly to big keys, mem keys will look for the biggest keys but also report on the average sizes.

    $ redis-cli --memkeys

    # Scanning the entire keyspace to find biggest keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Biggest string found so far '"counter:__rand_int__"' with 62 bytes
    [00.00%] Biggest string found so far '"key:__rand_int__"' with 63 bytes
    [00.00%] Biggest hash found so far '"myhash"' with 86 bytes
    [00.00%] Biggest list found so far '"mylist"' with 860473 bytes

    -------- summary -------

    Sampled 4 keys in the keyspace!
    Total key length in bytes is 48 (avg len 12.00)

    Biggest list found '"mylist"' has 860473 bytes
    Biggest hash found '"myhash"' has 86 bytes
    Biggest string found '"key:__rand_int__"' has 63 bytes

    1 lists with 860473 bytes (25.00% of keys, avg size 860473.00)
    1 hashs with 86 bytes (25.00% of keys, avg size 86.00)
    2 strings with 125 bytes (50.00% of keys, avg size 62.50)
    0 streams with 0 bytes (00.00% of keys, avg size 0.00)
    0 sets with 0 bytes (00.00% of keys, avg size 0.00)
    0 zsets with 0 bytes (00.00% of keys, avg size 0.00)

    Hot Keys: The hot keys scan is only available when the maxmemory-policy is set to volatile-lfu or allkeys-lfu. If you need to identity hot keys you can add this argument to redis-cli.

    $ redis-cli --hotkeys

    # Scanning the entire keyspace to find hot keys as well as
    # average sizes per key type. You can use -i 0.1 to sleep 0.1 sec
    # per 100 SCAN commands (not usually needed).

    [00.00%] Hot key '"key:__rand_int__"' found so far with counter 37

    -------- summary -------

    Sampled 5 keys in the keyspace!
    hot key found with counter: 37 keyname: "key:__rand_int__"

    Monitor: The MONITOR command allows you to see a stream of every command running against your Redis instance.

    127.0.0.1:6379 > monitor
    OK
    1616541192.039933 [0 127.0.0.1:57070] "PING"
    1616541276.052331 [0 127.0.0.1:57098] "set" "user:2398423hu" "KutMo"
    caution

    Since MONITOR streams back all commands, its use comes at a cost. It has been known to reduce performance by up to 50% so use with caution!

    Setting up and using the Redis Log File

    The Redis log file is the other important log you need to be aware of. It contains useful information for troubleshooting configuration and deployment errors. If you don't configure Redis logging, troubleshooting will be significantly harder.

    Redis has four logging levels, which you can configure directly in redis.conf file.

    Log Levels:

    • WARNING
    • NOTICE
    • VERBOSE
    • DEBUG

    Redis also supports sending the log files to a remote logging server through the use of syslog.

    Remote logging is important to many security professionals. These remote logging servers are frequently used to monitor security events and manage incidents. These centralized log servers perform three common functions: ensure the integrity of your log files, ensure that logs are retained for a specific period of time, and to correlate logs against other system logs to discover potential attacks on your infrastructure.

    Let's set up logging on our Redis deployment. First we'll open our redis.conf file:

    $ sudo vi /etc/redis/redis.conf

    The redis.conf file has an entire section dedicated to logging.

    First, find the logfile directive in the redis.conf file. This will allow you to define the logging directory. For this example lets use /var/log/redis/redis.log.

    If you'd like to use a remote logging server, then you'll need to uncomment the lines syslog-enabled, syslog-ident and syslog-facility, and ensure that syslog-enabled is set to yes.

    Next, we'll restart the Redis server.

    You should see the log events indicating that Redis is starting.

    $ sudo tail -f /var/log/redis/redis.log

    And next let's check that we are properly writing to syslog. You should see these same logs.

    $ less /var/log/syslog | grep redis

    Finally, you’ll need to send your logs to your remote logging server to ensure your logs will be backed up to this server. To do this, you’ll also have to modify the rsyslog configuration. This configuration varies depending on your remote logging server provider.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/index.html b/operate/redis-at-scale/observability/index.html index dc51461964..3a375206ca 100644 --- a/operate/redis-at-scale/observability/index.html +++ b/operate/redis-at-scale/observability/index.html @@ -4,7 +4,7 @@ Ensuring Observability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Observability in Redis

    Hello World! Observability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/observability/introduction/index.html b/operate/redis-at-scale/observability/introduction/index.html index 8613499093..a322842a40 100644 --- a/operate/redis-at-scale/observability/introduction/index.html +++ b/operate/redis-at-scale/observability/introduction/index.html @@ -4,7 +4,7 @@ 5.0 Introduction to Observability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    5.0 Introduction to Observability



    The last thing you want to do after successfully deploying and scaling Redis is to be stuck working on the weekend because performance is down or the service is unavailable!

    If you're running a managed service like Redis Cloud, you won't have to worry about these questions as much. But even then, it's still worthwhile to know about certain key Redis metrics.

    Some of the question you always want to be able to answer include:

    • Is Redis up and running right now?
    • Where is my Redis capacity at?
    • Is Redis accessible at this moment?
    • Is Redis performing the way we expect?
    • When failures occur… what exactly happened to Redis?

    Then of course you must ask...

    • How can I find this out ahead of time?

    Let's dig into these questions and more as we look into observability with Redis.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/exercise/index.html b/operate/redis-at-scale/persistence-and-durability/exercise/index.html index 131bdd5530..c2c4ce27e3 100644 --- a/operate/redis-at-scale/persistence-and-durability/exercise/index.html +++ b/operate/redis-at-scale/persistence-and-durability/exercise/index.html @@ -4,7 +4,7 @@ 2.2 Exercise: Saving a Snapshot | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.2 Exercise: Saving a Snapshot

    As we learned in the previous unit, Redis will save a snapshot of your database every hour if at least one key has changed, every five minutes if at least 100 keys have changed, or every 60 seconds if at least 10000 keys have changed.

    Let’s update this to a simplified hypothetical scenario where we want to save a snapshot if three keys have been modified in 20 seconds.

    Step 1

    Create a directory named 2.2 and in it prepare a redis.conf file.

    $ mkdir 2.2
    $ cd 2.2
    $ vim redis.conf

    The redis.conf file should specify a filename that will be used for the rdb file and a directive that will trigger the creation of a snapshot if 3 keys have been modified in 20 seconds, as described above.

    dbfilename my_backup_file.rdb
    save 20 3

    Step 2

    In the 2.2 directory, start a Redis server - passing it the redis.conf configuration file you just created.

    $ redis-server ./redis.conf

    In a separate terminal tab use the redis-cli to create three random keys, one after the other. For example:

    127.0.0.1:6379> SET a 1
    127.0.0.1:6379> SET b 2
    127.0.0.1:6379> SET c 3

    Run the ls command in the first terminal to list all the files in the 2.2 directory. What changed?

    Step 3

    Now we’re ready to take our persistence a level higher and set up an AOF file. Modify your redis.conf file so that the server will log every new write command and force writing it to disk.

    Be careful! We have a running server and we want this configuration to be applied without restarting it.

    127.0.0.1:6379> CONFIG SET appendonly yes
    127.0.0.1:6379> CONFIG SET appendfsync always

    In order for these settings to be persisted to the redis.conf file we need to save them:

    127.0.0.1:6379> CONFIG REWRITE

    Step 4

    Create a few random keys through redis-cli. Check the contents of the directory 2.2 again. What changed?

    Step 5

    As a final step, restart the Redis server process (you can press Ctrl+C in the terminal to stop the process and re-run it again). If you run the SCAN 0 command you will see that all the keys you created are still in the database, even though we restarted the process.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/index.html b/operate/redis-at-scale/persistence-and-durability/index.html index cbc030a49e..37b3088c80 100644 --- a/operate/redis-at-scale/persistence-and-durability/index.html +++ b/operate/redis-at-scale/persistence-and-durability/index.html @@ -4,7 +4,7 @@ Ensuring Persistence & Durability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Persistence & Durability in Redis

    Hello World! Persistence & Durability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/introduction/index.html b/operate/redis-at-scale/persistence-and-durability/introduction/index.html index 2d8f720b1c..3aa853cbf0 100644 --- a/operate/redis-at-scale/persistence-and-durability/introduction/index.html +++ b/operate/redis-at-scale/persistence-and-durability/introduction/index.html @@ -4,7 +4,7 @@ 2.0 Introduction to Persistence and Durability | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.0 Introduction to Persistence and Durability



    Hello! Congrats on completing Section 1. Section 2 is a bit shorter but contains some important information on persistence and durability.

    As I am sure you know, Redis serves all data directly from memory. But Redis is also capable of persisting data to disk. Persistence preserves data in the event of a server restart.

    In the following video and exercise, we'll look at the options for persisting data to disk. We'll show you how to enable persistence, and you'll then do a hands-on exercise setting up snapshots of your Redis instance.

    Good luck, and we'll see you in the next sections.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html b/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html index 37c0bbf2a2..bce1da0ee2 100644 --- a/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html +++ b/operate/redis-at-scale/persistence-and-durability/persistence-options-in-redis/index.html @@ -4,7 +4,7 @@ 2.1 Persistence options in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    2.1 Persistence options in Redis



    If a Redis server that only stores data in RAM is restarted, all data is lost. To prevent such data loss, there needs to be some mechanism for persisting the data to disk; Redis provides two of them: snapshotting and anappend-only file, or AOF. You can configure your Redis instances to use either of the two, or a combination of both.

    When a snapshot is created, the entire point-in-time view of the dataset is written to persistent storage in a compact .rdb file. You can set up recurring backups, for example every 1, 12, or 24 hours and use these backups to easily restore different versions of the data set in case of disasters. You can also use these snapshots to create a clone of the server, or simply leave them in place for a future restart.

    Creating a .rdb file requires a lot of disk I/O. If performed in the main Redis process, this would reduce the server’s performance. That’s why this work is done by a forked child process. But even forking can be time-consuming if the dataset is large. This may result in decreased performance or in Redis failing to serve clients for a few milliseconds or even up to a second for very large datasets. Understanding this should help you decide whether this solution makes sense for your requirements.

    You can configure the name and location of the .rdb file with the dbfilename and dir configuration directives, either through the redis.conf file, or through the redis-cli as explained in Section 1 Unit 2. And of course you can configure how often you want to create a snapshot. Here’s an excerpt from the redis.conf file showing the default values.

    As an example, this configuration will make Redis automatically dump the dataset to disk every 60 seconds if at least 1000 keys changed in that period. While snapshotting is a great strategy for the use cases explained above, it leaves a huge possibility for data loss. You can configure snapshots to run every few minutes, or after X writes against the database, but if the server crashes you lose all the writes since the last snapshot was taken. In many use cases, that kind of data loss can be acceptable, but in many others it is absolutely not. For all of those other use cases Redis offers the AOF persistence option.

    AOF, or append-only file works by logging every incoming write command to disk as it happens. These commands can then be replayed at server startup, to reconstruct the original dataset. Commands are logged using the same format as the Redis protocol itself, in an append-only fashion. The AOF approach provides greater durability than snapshotting, and allows you to configure how often file syncs happen.

    Depending on your durability requirements (or how much data you can afford to lose), you can choose which fsync policy is the best for your use case:

    • fsync every write: The safest policy: The write is acknowledged to the client only after it has been written to the AOF file and flushed to disk. Since in this approach we are writing to disk synchronously, we can expect a much higher latency than usual.
    • fsync every second: The default policy. Fsync is performed asynchronously, in a background thread, so write performance is still high. Choose this option if you need high performance and can afford to lose up to one second worth of writes.
    • no fsync: In this case Redis will log the command to the file descriptor, but will not force the OS to flush the data to disk. If the OS crashes we can lose a few seconds of data (Normally Linux will flush data every 30 seconds with this configuration, but it's up to the kernel’s exact tuning.).

    The relevant configuration directives for AOF are shown on the screen. AOF contains a log of all the operations that modified the database in a format that’s easy to understand and parse. When the file gets too big, Redis can automatically rewrite it in the background, compacting it in a way that only the latest state of the data is preserved.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/exercise-1/index.html b/operate/redis-at-scale/scalability/exercise-1/index.html index 4e23a95b6e..e91ec90570 100644 --- a/operate/redis-at-scale/scalability/exercise-1/index.html +++ b/operate/redis-at-scale/scalability/exercise-1/index.html @@ -4,7 +4,7 @@ 4.1 Exercise - Creating a Redis Cluster | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.1 Exercise - Creating a Redis Cluster

    Step 1

    To create a cluster, we need to spin up a few empty Redis instances and configure them to run in cluster mode.

    Here’s a minimal configuration file for Redis Cluster:

    # redis.conf file
    port 7000
    cluster-enabled yes
    cluster-config-file nodes.conf
    cluster-node-timeout 5000
    appendonly yes

    On the first line we specify the port on which the server should run, then we state that we want the server to run in cluster mode, with the cluster-enabled yes directive. cluster-config-file defines the name of the file where the configuration for this node is stored, in case of a server restart. Finally, cluster-node-timeout is the number of milliseconds a node must be unreachable for it to be considered in failure state.

    Step 2

    Let’s create a cluster on your localhost with three primary shards and three replicas (remember, in production always use two replicas to protect against a split-brain situation). We’ll need to bring up six Redis processes and create a redis.conf file for each of them, specifying their port and the rest of the configuration directives above.

    First, create six directories:

    mkdir -p {7000..7005}

    Step 3

    Then create the minimal configuration redis.conf file from above in each one of them, making sure you change the port directive to match the directory name.

    To copy the initial redis.conf file to each folder, run the following:

    for i in {7000..7005}; do cp redis.conf $i; done

    You should end up with the following directory structure:

    - 7000
    - redis.conf
    - 7001
    - redis.conf
    - 7002
    - redis.conf
    - 7003
    - redis.conf
    - 7004
    - redis.conf
    - 7005
    - redis.conf

    Step 4

    Open six terminal tabs and start the servers by going into each one of the directories and starting a Redis instance:

    # Terminal tab 1
    cd 7000
    /path/to/redis-server ./redis.conf
    # Terminal tab 2
    cd 7001
    /path/to/redis-server ./redis.conf
    ... and so on.

    Step 5

    Now that you have six empty Redis servers running, you can join them in a cluster:

    redis-cli --cluster create 127.0.0.1:7000 127.0.0.1:7001 \
    127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 \
    --cluster-replicas 1

    Here we list the ports and IP addresses of all six servers and use the create command to instruct Redis to join them in a cluster, creating one replica for each primary. redis-cli will propose a configuration; accept it by typing yes. The cluster will be configured and joined, which means, instances will be bootstrapped into talking with each other.

    Finally, you should see a message saying:

    [OK] All 16384 slots covered

    This means that there is at least a master instance serving each of the 16384 slots available.

    Step 6

    Let’s add a new shard to the cluster, which is something you might do when you need to scale.

    First, as before, we need to start two new empty Redis instances (primary and its replica) in cluster mode. We create new directories 7006 and 7007 and in them we copy the same redis.conf file we used before, making sure we change the port directive in them to the appropriate port (7006 and 7007).

    $ mkdir 7006 7007
    $ cp 7000/redis.conf 7006/redis.conf
    $ cp 7000/redis.conf 7007/redis.conf

    Update the port numbers in the files ./7006/redis.conf and ./7007/redis.conf to 7006 and 7007, respectively.

    Step 7

    Let’s start the Redis instances:

    # Terminal tab 7
    $ cd 7006
    $ redis-server ./redis.conf
    # Terminal tab 8
    $ cd 7007
    $ redis-server ./redis.conf

    Step 8

    In the next step we join the new primary shard to the cluster with the add-node command. The first parameter is the address of the new shard, and the second parameter is the address of any of the current shards in the cluster.

    redis-cli --cluster add-node 127.0.0.1:7006 127.0.0.1:7000
    note

    The Redis commands use the term “Nodes” for what we call “Shards” in this training, so a command named “add-node” would mean “add a shard”.

    Step 9

    Finally we need to join the new replica shard, with the same add-node command, and a few extra arguments indicating the shard is joining as a replica and what will be its primary shard. If we don’t specify a primary shard Redis will assign one itself.

    We can find the IDs of our shards by running the cluster nodes command on any of the shards:

    $ redis-cli -p 7000 cluster nodes
    46a768cfeadb9d2aee91ddd882433a1798f53271 127.0.0.1:7006@17006 master - 0 1616754504000 0 connected
    1f2bc068c7ccc9e408161bd51b695a9a47b890b2 127.0.0.1:7003@17003 slave a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 0 1616754504551 3 connected
    5b4e4be56158cf6103ffa3035024a8d820337973 127.0.0.1:7001@17001 master - 0 1616754505584 2 connected 5461-10922
    a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 127.0.0.1:7002@17002 master - 0 1616754505000 3 connected 10923-16383
    71e078dab649166dcbbcec51520742bc7a5c1992 127.0.0.1:7005@17005 slave 5b4e4be56158cf6103ffa3035024a8d820337973 0 1616754505584 2 connected
    f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 127.0.0.1:7000@17000 myself,master - 0 1616754502000 1 connected 0-5460
    04d71d5eb200353713da475c5c4f0a4253295aa4 127.0.0.1:7004@17004 slave f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 0 1616754505896 1 connected

    The port of the primary shard we added in the last step was 7006, and we can see it on the first line. It’s id is 46a768cfeadb9d2aee91ddd882433a1798f53271.

    The resulting command is:

    $ redis-cli -p 7000 --cluster add-node 127.0.0.1:7007 127.0.0.1:7000 --cluster-slave --cluster-master-id 46a768cfeadb9d2aee91ddd882433a1798f53271

    The flag cluster-slave indicates that the shard should join as a replica and --cluster-master-id 46a768cfeadb9d2aee91ddd882433a1798f53271 specifies which primary shard it should replicate.

    Step 10

    Now our cluster has eight shards (four primary and four replica), but if we run the cluster slots command we’ll see that the newly added shards don’t host any hash slots, and thus - data. Let’s assign some hash slots to them:

    $ redis-cli  -p 7000  --cluster reshard 127.0.0.1:7000

    We use the command reshard and the address of any shard in the cluster as an argument here. In the next step we’ll be able to choose the shards we’ll be moving slots from and to.

    The first question you’ll get is about the number of slots you want to move. If we have 16384 slots in total, and four primary shards, let’s get a quarter of all shards, so the data is distributed equally. 16384 ÷ 4 is 4096, so let’s use that number.

    The next question is about the receiving shard id; the ID of the primary shard we want to move the data to, which we learned how to get in the previous Step, with the cluster nodes command.

    Finally, we need to enter the IDs of the shards we want to copy data from. Alternatively, we can type “all” and the shard will move a number of hash slots from all available primary shards.

    $ redis-cli -p 7000 --cluster reshard 127.0.0.1:7000
    ....
    ....
    ....

    How many slots do you want to move (from 1 to 16384)? 4096
    What is the receiving node ID? 46a768cfeadb9d2aee91ddd882433a1798f53271
    Please enter all the source node IDs.
    Type 'all' to use all the nodes as source nodes for the hash slots.
    Type 'done' once you entered all the source nodes IDs.
    Source node #1: all

    Ready to move 4096 slots.
    Source nodes:
    M: f224ecabedf39d1fffb34fb6c1683f8252f3b7dc 127.0.0.1:7000
    slots:[0-5460] (5461 slots) master
    1 additional replica(s)
    M: 5b4e4be56158cf6103ffa3035024a8d820337973 127.0.0.1:7001
    slots:[5461-10922] (5462 slots) master
    1 additional replica(s)
    M: a138f48fe038b93ea2e186e7a5962fb1fa6e34fa 127.0.0.1:7002
    slots:[10923-16383] (5461 slots) master
    1 additional replica(s)
    Destination node:
    M: 46a768cfeadb9d2aee91ddd882433a1798f53271 127.0.0.1:7006
    slots: (0 slots) master
    1 additional replica(s)
    Resharding plan:
    Moving slot 5461 from 5b4e4be56158cf6103ffa3035024a8d820337973
    Moving slot 5462 from 5b4e4be56158cf6103ffa3035024a8d820337973

    Do you want to proceed with the proposed reshard plan (yes/no)?
    Moving slot 5461 from 127.0.0.1:7001 to 127.0.0.1:7006:
    Moving slot 5462 from 127.0.0.1:7001 to 127.0.0.1:7006:
    Moving slot 5463 from 127.0.0.1:7001 to 127.0.0.1:7006:
    ....
    ....
    ....

    Once the command finishes we can run the cluster slots command again and we’ll see that our new primary and replica shards have been assigned some hash slots:

    $ redis-cli -p 7000 cluster slots
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/index.html b/operate/redis-at-scale/scalability/index.html index 73bd0cd1e0..ff055ecc47 100644 --- a/operate/redis-at-scale/scalability/index.html +++ b/operate/redis-at-scale/scalability/index.html @@ -4,7 +4,7 @@ Ensuring Scalability in Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Ensuring Scalability in Redis

    Hello World! Scalability

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/lustering-in-redis/index.html b/operate/redis-at-scale/scalability/lustering-in-redis/index.html index a609a277ed..6d80c0e189 100644 --- a/operate/redis-at-scale/scalability/lustering-in-redis/index.html +++ b/operate/redis-at-scale/scalability/lustering-in-redis/index.html @@ -4,7 +4,7 @@ 4.0 Clustering In Redis | The Home of Redis Developers - + @@ -13,7 +13,7 @@

    4.0 Clustering In Redis


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis



    Before we jump into the details, let's first address the elephant in the room: DBaaS offerings, or "database-as-a-service" in the cloud. No doubt, it's useful to know how Redis scales and how you might deploy it. But deploying and maintaining a Redis cluster is a fair amount of work. So if you don't want to deploy and manage Redis yourself, then consider signing up for Redis Cloud, our managed service, and let us do the scaling for you. Of course, that route is not for everyone. And as I said, there's a lot to learn here, so let's dive in.

    We'll start with scalability. Here's one definition:

    “Scalability is the property of a system to handle a growing amount of work by adding resources to the system.” Wikipedia

    The two most common scaling strategies are vertical scaling and horizontal scaling. Vertical scaling, or also called “Scaling Up”, means adding more resources like CPU or memory to your server. Horizontal scaling, or “Scaling out”, implies adding more servers to your pool of resources. It's the difference between just getting a bigger server and deploying a whole fleet of servers.

    Let's take an example. Suppose you have a server with 128 GB of RAM, but you know that your database will need to store 300 GB of data. In this case, you’ll have two choices: you can either add more RAM to your server so it can fit the 300GB dataset, or you can add two more servers and split the 300GB of data between the three of them. Hitting your server’s RAM limit is one reason you might want to scale up, or out, but reaching the performance limit in terms of throughput, or operations per second, is also an indicator that scaling is necessary.

    Since Redis is mostly single-threaded, Redis cannot make use of the multiple cores of your server’s CPU for command processing. But if we split the data between two Redis servers, our system can process requests in parallel, increasing the throughput by almost 200%. In fact, performance will scale close to linearly by adding more Redis servers to the system. This database architectural pattern of splitting data between multiple servers for the purpose of scaling is called sharding. The resulting servers that hold chunks of the data are called shards.

    This performance increase sounds amazing, but it doesn’t come without some cost: if we divide and distribute our data across two shards, which are just two Redis server instances, how will we know where to look for each key? We need to have a way to consistently map a key to a specific shard. There are multiple ways to do this and different databases adopt different strategies. The one Redis chose is called “Algorithmic sharding” and this is how it works:

    In order to find the shard on which a key lives we compute a numeric hash value out of the key name and modulo divide it by the total number of shards. Because we are using a deterministic hash function the key “foo” will always end up on the same shard, as long as the number of shards stays the same.

    But what happens if we want to increase our shard count even further, a process commonly called resharding? Let’s say we add one new shard so that our total number of shards is three. When a client tries to read the key “foo” now, they will run the hash function and modulo divide by the number of shards, as before, but this time the number of shards is different and we’re modulo dividing with three instead of two. Understandably, the result may be different, pointing us to the wrong shard!

    Resharding is a common issue with the algorithmic sharding strategy and can be solved by rehashing all the keys in the keyspace and moving them to the shard appropriate to the new shard count. This is not a trivial task, though, and it can require a lot of time and resources, during which the database will not be able to reach its full performance or might even become unavailable.

    Redis chose a very simple approach to solving this problem: it introduced a new, logical unit that sits between a key and a shard, called a hash slot.

    One shard can contain many hash slots, and a hash slot contains many keys. The total number of hash slots in a database is always 16384 (16K). This time, the modulo division is not done with the number of shards anymore, but instead with the number of hash slots, that stays the same even when resharding and the end result will give us the position of the hash slot where the key we’re looking for lives. And when we do need to reshard, we simply move hash slots from one shard to another, distributing the data as required across the different redis instances.

    Now that we know what sharding is and how it works in Redis, we can finally introduce Redis Cluster. Redis Cluster provides a way to run a Redis installation where data is automatically split across multiple Redis servers, or shards. Redis Cluster also provides high availability. So, if you're deploying Redis Cluster, you don't need (or use) Redis Sentinel.

    Redis Cluster can detect when a primary shard fails and promote a replica to a primary without any manual intervention from the outside. How does it do it? How does it know that a primary shard has failed, and how does it promote its replica to be the new primary shard? We need to have replication enabled. Say we have one replica for every primary shard. If all our data is divided between three Redis servers, we would need a six-member cluster, with three primary shards and three replicas.

    All 6 shards are connected to each other over TCP and constantly PING each other and exchange messages using a binary protocol. These messages contain information about which shards have responded with a PONG, so are considered alive, and which haven’t.

    When enough shards report that a certain primary shard is not responding to them, they can agree to trigger a failover and promote the shard’s replica to become the new primary. How many shards need to agree that a shard is offline before a failover is triggered? Well, that’s configurable and you can set it up when you create a cluster, but there are some very important guidelines that you need to follow.

    If you have an even number of shards in the cluster, say six, and there’s a network partition that divides the cluster in two, you'll then have two groups of three shards. The group on the left side will not be able to talk to the shards from the group on the right side, so the cluster will think that they are offline and it will trigger a failover of any primary shards, resulting in a left side with all primary shards. On the right side, the three shards will see the shards on the left as offline, and will trigger a failover on any primary shard that was on the left side, resulting in a right side of all primary shards. Both sides, thinking they have all the primaries, will continue to receive client requests that modify data, and that is a problem, because maybe client A sets the key “foo” to “bar” on the left side, but a client B sets the same key’s value to “baz” on the right side.

    When the network partition is removed and the shards try to rejoin, we will have a conflict, because we have two shards - holding different data claiming to be the primary and we wouldn’t know which data is valid.

    This is called a split brain situation, and is a very common issue in the world of distributed systems. A popular solution is to always keep an odd number of shards in your cluster, so that when you get a network split, the left and right group will do a count and see if they are in the bigger or the smaller group (also called majority or minority). If they are in the minority, they will not try to trigger a failover and will not accept any client write requests.

    Here's the bottom line: to prevent split-brain situations in Redis Cluster, always keep an odd number of primary shards and two replicas per primary shard.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html b/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html index 0ce466cfcd..c06feb7eb1 100644 --- a/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html +++ b/operate/redis-at-scale/scalability/redis-cli-with-redis-cluster/index.html @@ -4,7 +4,7 @@ 4.2 Using Redis-CLI with a Redis Cluster | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.2 Using Redis-CLI with a Redis Cluster

    When you use redis-cli to connect to a shard of a Redis Cluster, you are connected to that shard only, and cannot access data from other shards. If you try to access keys from the wrong shard, you will get a MOVED error.

    There is a trick you can use with redis-cli so you don’t have to open connections to all the shards, but instead you let it do the connect and reconnect work for you. It’s the redis-cli cluster support mode, triggered by the -c switch:

    $ redis-cli -p 7000 -c

    When in cluster mode, if the client gets an (error) MOVED 15495 127.0.0.1:7002 error response from the shard it’s connected to, it will simply reconnect to the address returned in the error response, in this case 127.0.0.1:7002.

    Now it’s your turn: use redis-cli cluster mode to connect to your cluster and try accessing keys in different shards. Observe the response messages.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html b/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html index c4cb139c00..29fa35a2eb 100644 --- a/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html +++ b/operate/redis-at-scale/scalability/redis-cluster-and-client-libraries/index.html @@ -4,7 +4,7 @@ 4.3 Redis Cluster and Client Libraries | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    4.3 Redis Cluster and Client Libraries

    To use a client library with Redis Cluster, the client libraries need to be cluster-aware. Clients that support Redis Cluster typically feature a special connection module for managing connections to the cluster. The process that some of the better client libraries follow usually goes like this:

    The client connects to any shard in the cluster and gets the addresses of the rest of the shards. The client also fetches a mapping of hash slots to shards so it can know where to look for a key in a specific hash slot. This hash slot map is cached locally.


    Hash Slot Diagram

    When the client needs to read/write a key, it first runs the hashing function (crc16) on the key name and then modulo divides by 16384, which results in the key’s hash slot number.

    In the example below the hash slot number for the key “foo” is 12182. Then the client checks the hash slot number against the hash slot map to determine which shard it should connect to. In our example, the hash slot number 12182 lives on shard 127.0.0.1:7002.

    Finally, the client connects to the shard and finds the key it needs to work with.


    Hash Slot Map Diagram

    If the topology of the cluster changes for any reason and the key has been moved, the shard will respond with an (error) MOVED 15495 127.0.0.1:7006 error, returning the address of the new shard responsible for that key. This indicates to the client that it needs to re-query the cluster for its topology and hash slot allocation, so it will do that and update its local hash slot map for future queries.

    Not every client library has this extra logic built in, so when choosing a client library, make sure to look for ones with cluster support.

    Another detail to check is if the client stores the hash slot map locally. If it doesn’t, and it relies on the (error) MOVED response to get the address of the right shard, you can expect to have a much higher latency than usual because your client may have to make two network requests instead of one for a big part of the requests.

    Examples of clients that support Redis cluster:

    • Java: Jedis
    • .NET: StackExchange.Redis
    • Go: Radix, go-redis/redis
    • Node.js: node-redis, ioredis
    • Python: redis-py

    Here's a list of Redis Clients: https://redis.io/clients

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html b/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html index fce96af1d2..f466ccf72f 100644 --- a/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html +++ b/operate/redis-at-scale/talking-to-redis/client-performance-improvements/index.html @@ -4,7 +4,7 @@ 1.4 Client Performance Improvements | The Home of Redis Developers - + @@ -15,7 +15,7 @@ With connection pooling, the client library will instantiate a series of (persistent) connections to the Redis server and keep them open. When the application needs to send a request, the current thread will get one of these connections from the pool, use it, and return it when done.


    Connection Pool diagram
    So if possible, always try to choose a client library that supports pooling connections, because that decision alone can have a huge influence on your system’s performance.

    Pipelining

    As in any client-server application, Redis can handle many clients simultaneously. Each client does a (typically blocking) read on a socket and waits for the server response. The server reads the request from the socket, parses it, processes it, and writes the response to the socket. The time the data packets take to travel from the client to the server, and then back again, is called network round trip time, or RTT. If, for example, you needed to execute 50 commands, you would have to send a request and wait for the response 50 times, paying the RTT cost every single time. To tackle this problem, Redis can process new requests even if the client hasn't already read the old responses. This way, you can send multiple commands to the server without waiting for the replies at all; the replies are read in the end, in a single step.


    Pipelining diagram
    This technique is called pipelining and is another good way to improve the performance of your system. Most Redis libraries support this technique out of the box.

    Supplemental Reading:

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html b/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html index f2718e5f33..f8023a8cff 100644 --- a/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html +++ b/operate/redis-at-scale/talking-to-redis/command-line-tool/index.html @@ -4,7 +4,7 @@ 1.1 The Command Line Tool: Redis-CLI | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.1 The Command Line Tool: Redis-CLI



    Redis-cli is a command line tool used to interact with the Redis server. Most package managers include redis-cli as part of the redis package. It can also be compiled from source, and you'll find the source code in the Redis repository on GitHub.

    There are two ways to use redis-cli :

    • an interactive mode where the user types commands and sees the replies;
    • a command mode where the command is provided as an argument to redis-cli, executed, and results sent to the standard output.

    Let’s use the CLI to connect to a Redis server running at 172.22.0.3 and port 7000. The arguments -h and -p are used to specify the host and port to connect to. They can be omitted if your server is running on the default host "localhost" port 6379.

    The redis-cli provides some useful productivity features. For example, you can scroll through your command history by pressing the up and down arrow keys. You can also use the TAB key to autocomplete a command, saving even more keystrokes. Just type the first few letters of a command and keep pressing TAB until the command you want appears on screen.

    Once you have the command name you want, the CLI will display syntax hints about the arguments so you don’t have to remember all of them, or open up the Redis command documentation.

    These three tips can save you a lot of time and take you a step closer to being a power user.

    You can do much more with redis-cli, like sending output to a file, scanning for big keys, get continuous stats, monitor commands and so on. For a much more detailed explanation refer to the documentation.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html b/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html index 81980592b4..fd3282ca14 100644 --- a/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html +++ b/operate/redis-at-scale/talking-to-redis/configuring-a-redis-server/index.html @@ -4,7 +4,7 @@ 1.2 Configuring a Redis Server | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.2 Configuring a Redis Server

    The self-documented Redis configuration file called redis.conf has been mentioned many times as an example of well written documentation. In this file you can find all possible Redis configuration directives, together with a detailed description of what they do and their default values.

    You should always adjust the redis.conf file to your needs and instruct Redis to run based on it's parameters when running Redis in production.

    The way to do that is by providing the path to the file when starting up your server:

    $ redis-server./path/to/redis.conf

    When you’re only starting a Redis server instance for testing purposes you can pass configuration directives directly on the command line:

    $ redis-server --port 7000 --replicaof 127.0.0.1:6379

    The format of the arguments passed via the command line is exactly the same as the one used in the redis.conf file, with the exception that the keyword is prefixed with --.

    Note that internally this generates an in-memory temporary config file where arguments are translated into the format of redis.conf.

    It is possible to reconfigure a running Redis server without stopping or restarting it by using the special commands CONFIG SET and CONFIG GET.

    127.0.0.1:6379> CONFIG GET *

    127.0.0.1:6379> CONFIG SET something

    127.0.0.1:6379> CONFIG REWRITE

    Not all the configuration directives are supported in this way, but you can check the output of the command CONFIG GET * first for a list of all the supported ones.

    tip

    Modifying the configuration on the fly has no effect on the redis.conf file. At the next restart of Redis the old configuration will be used instead. If you want to force update the redis.conf file with your current configuration settings you can run the CONFIG REWRITE command, which will automatically scan your redis.conf file and update the fields which don't match the current configuration value.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/index.html b/operate/redis-at-scale/talking-to-redis/index.html index b8644ceb68..ec1157f5e2 100644 --- a/operate/redis-at-scale/talking-to-redis/index.html +++ b/operate/redis-at-scale/talking-to-redis/index.html @@ -4,7 +4,7 @@ Talking to Redis | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Talking to Redis


    Profile picture for Justin Castilla
    Author:
    Justin Castilla, Senior Developer Advocate at Redis

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html b/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html index e4d95ead88..99f7d90e74 100644 --- a/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html +++ b/operate/redis-at-scale/talking-to-redis/initial-tuning/index.html @@ -4,7 +4,7 @@ 1.5 Initial Tuning | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.5 Initial Tuning

    We love Redis because it’s fast (and fun!), so as we begin to consider scaling out Redis, we first want to make sure we've done everything we can to maximize its performance.

    Let's start by looking at some important tuning parameters.

    Max Clients

    Redis has a default of max of 10,000 clients; after that maximum has been reached, Redis will respond to all new connections with an error. If you have a lot of connections (or a lot of application instances), then you may need to go higher. You can set the max number of simultaneous clients in the Redis config file:

    maxclients 20000

    Max Memory

    By default, Redis has no max memory limit, so it will use all available system memory. If you are using replication, you will want to limit the memory usage in order to have overhead for replica output buffers. It’s also a good idea to leave memory for the system. Something like 25% overhead. You can update this setting in Redis config file:

    # memory size in bytes
    maxmemory 1288490188

    Set TCP-BACKLOG

    The Redis server uses the value of tcp-backlog to specify the size of the complete connection queue.

    Redis passes this configuration as the second parameter of the listen(int s, int backlog) call.

    If you have many connections, you will need to set this higher than the default of 511. You can update this in Redis config file:

    # TCP listen() backlog.
    #
    # In high requests-per-second environments you need an high backlog in order
    # to avoid slow clients connections issues. Note that the Linux kernel
    # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
    # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
    # in order to get the desired effect.
    tcp-backlog 65536

    As the comment in redis.conf indicates, the value of somaxconn and tcp_max_syn_backlog may need to be increased at the OS level as well.

    Set Read Replica Configurations

    One simple way to scale Redis is to add read replicas and take load off of the primary. This is most effective when you have a read-heavy (as opposed to write-heavy) workload. You will probably want to have the replica available and still serving stale data, even if the replication is not completed. You can update this in the Redis config:

    slave-serve-stale-data yes

    You will also want to prevent any writes from happening on the replicas. You can update this in the Redis config:

    slave-read-only yes

    Kernel Memory

    Under high load, occasional performance dips can occur due to memory allocation. This is something Salvatore, the creator of Redis, blogged about in the past. The performance issue is related to transparent hugepages, which you can disable at the OS level if needed.

    $ echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled

    Kernel Network Stack

    If you plan on handling a large number of connections in a high performance environment, we recommend tuning the following kernel parameters:

    vm.swappiness=0                       # turn off swapping
    net.ipv4.tcp_sack=1 # enable selective acknowledgements
    net.ipv4.tcp_timestamps=1 # needed for selective acknowledgements
    net.ipv4.tcp_window_scaling=1 # scale the network window
    net.ipv4.tcp_congestion_control=cubic # better congestion algorithm
    net.ipv4.tcp_syncookies=1 # enable syn cookies
    net.ipv4.tcp_tw_recycle=1 # recycle sockets quickly
    net.ipv4.tcp_max_syn_backlog=NUMBER # backlog setting
    net.core.somaxconn=NUMBER # up the number of connections per port
    net.core.rmem_max=NUMBER # up the receive buffer size
    net.core.wmem_max=NUMBER # up the buffer size for all connections

    File Descriptor Limits

    If you do not set the correct number of file descriptors for the Redis user, you will see errors indicating that “Redis can’t set maximum open files..” You can increase the file descriptor limit at the OS level.

    Here's an example on Ubuntu using systemd:

    /etc/systemd/system/redis.service
    [Service]
    ...
    User=redis
    Group=redis
    ...
    LimitNOFILE=65536
    ...

    You will then need to reload the daemon and restart the redis service.

    Enabling RPS (Receive Packet Steering) and CPU preferences

    One way we can improve performance is to prevent Redis from running on the same CPUs as those handling any network traffic. This can be accomplished by enabling RPS for our network interfaces and creating some CPU affinity for our Redis process.

    Here is an example. First we can enable RPS on CPUs 0-1:

    $ echo '3' > /sys/class/net/eth1/queues/rx-0/rps_cpus

    Then we can set the CPU affinity for redis to CPUs 2-8:

    # config is set to write pid to /var/run/redis.pid
    $ taskset -pc 2-8 `cat /var/run/redis.pid`
    pid 8946's current affinity list: 0-8
    pid 8946's new affinity list: 2-8
    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/redis-clients/index.html b/operate/redis-at-scale/talking-to-redis/redis-clients/index.html index a1587c7b18..e2271f458d 100644 --- a/operate/redis-at-scale/talking-to-redis/redis-clients/index.html +++ b/operate/redis-at-scale/talking-to-redis/redis-clients/index.html @@ -4,7 +4,7 @@ 1.3 Redis Clients | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.3 Redis Clients



    Redis has a client-server architecture and uses a request-response model. Applications send requests to the Redis server, which processes them and returns responses for each. The role of a Redis client library is to act as an intermediary between your application and the Redis server.

    Client libraries perform the following duties:

    • Implement the Redis wire protocol - the format used to send requests to and receive responses from the Redis server
    • Provide an idiomatic API for using Redis commands from a particular programming language

    Managing the connection to Redis

    Redis clients communicate with the Redis server over TCP, using a protocol called RESP (REdis Serialization Protocol) designed specifically for Redis.

    The RESP protocol is simple and text-based, so it is easily read by humans, as well as machines. A common request/response would look something like this. Note that we're using netcat here to send raw protocol:

    This simple, well documented protocol has resulted in Redis clients for almost every language you can think of. The redis.io client page lists over 200 client libraries for more than 50 programming languages.

    - + \ No newline at end of file diff --git a/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html b/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html index 52a420ed60..472ddc86fe 100644 --- a/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html +++ b/operate/redis-at-scale/talking-to-redis/redis-server-overview/index.html @@ -4,7 +4,7 @@ 1.0 Redis Server Overview | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    1.0 Redis Server Overview



    As you might already know, Redis is an open source data structure server written in C. You can store multiple data types, like strings, hashes, and streams and access them by a unique key name.

    For example, if you have a string value “Hello World” saved under the key name “greeting”, you can access it by running the GET command followed by the key name - greeting. All keys in a Redis database are stored in a flat keyspace. There is no enforced schema or naming policy, and the responsibility for organizing the keyspace is left to the developer.

    The speed Redis is famous for is mostly due to the fact that Redis stores and serves data entirely from RAM instead of disk, as most other databases do. Another contributing factor is its predominantly single-threaded nature: single-threading avoids race conditions and CPU-heavy context switching associated with threads.

    Indeed, this means that open source Redis can’t take advantage of the processing power of multiple CPU cores, although CPU is rarely the bottleneck with Redis. You are more likely to bump up against memory or network limitations before hitting any CPU limitations. That said, Redis Enterprise does let you take advantage of all of the cores on a single machine.

    Let’s now look at exactly what happens behind the scenes with every Redis request. When a client sends a request to a Redis server, the request is first read from the socket, then parsed and processed and finally, the response is written back to the socket and sent to the user. The reading and especially writing to a socket are expensive operations, so in Redis version 6.0 multi-threaded I/O was introduced. When this feature is enabled, Redis can delegate the time spent reading and writing to I/O sockets over to other threads, freeing up cycles for storing and retrieving data and boosting overall performance by up to a factor of two for some workloads.

    Throughout the rest of the section, you’ll learn how to use the Redis command-line interface, how to configure your Redis server, and how to choose and tune your Redis client library.

    - + \ No newline at end of file diff --git a/redis-insiders/index.html b/redis-insiders/index.html index 8a8c4166a6..3759ce1e8a 100644 --- a/redis-insiders/index.html +++ b/redis-insiders/index.html @@ -4,7 +4,7 @@ Redis Insiders, our ambassador program | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Insiders, our ambassador program


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis

    Redis Insiders is our first community ambassador program, launched in May 2022.

    Our Redis Insiders are enthusiastic community members who love using Redis, and helping others to use it. They spread the word through technical talks, blog posts, live streams and more!

    Meet our Redis Insiders

    Our current Insiders are...

    Jyotsna Gupta

    Jyotsna Gupta headshot

    Jyotsna is based in Bangalore, India and works as a Senior Software Engineer at Gojek (GoPay). She is an open source enthusiast and has been using Redis for the past three years, with Golang and Java. She applied to become a Redis Insider because she is passionate about communities and loves to share and learn together. When she’s not working or volunteering, she energises herself by playing badminton, table tennis, basketball, Carrom and chess.

    Find out more about Jyotsna.

    Moiz Kapasi

    Moiz Kapasi headshot

    Moiz Kapasi is a Paris, France-based Solution Architect at Capgemini who builds apps in the enterprise landscape of a major European car manufacturer, using Java / J2EE. Moiz was drawn to the Redis Insiders program because, since he started using Redis 1.5 years ago, the simplicity and power of Redis has fascinated him. His hobbies include cycling along the River Seine, camping, reading classic literature and philately.

    Find out more about Moiz.

    Michael Owolabi

    Michael Owolabi headshot

    Michael is a Senior Software Engineer at Spleet, from Lagos, Nigeria. He is a JavaScript programmer and has been using Redis for more than two years. In his spare time, Michael enjoys travelling, adventure, writing, and volunteering. Michael applied to become a Redis Insider because as a lover of Redis himself, he wanted an opportunity to meet with and learn from other Redis professionals around the globe and also share his knowledge of Redis through speaking and writing.

    Find out more about Michael.

    Stevan Thomas

    Stevan Thomas headshot

    Stevan is a Senior Software Engineer at Vela, with five years of Redis experience. He builds web, mobile and desktop apps for a variety of industries including shipping / logistics, finance, retail and health using JavaScript, Swift, Java, C# and Python. Stevan lives in Port of Spain, Trinidad and Tobago and, in his spare time, enjoys hiking, fitness / CrossFit, watching movies and learning new technologies. He is excited to join the Redis Insiders program because he wants to be a positive influence in the adoption of Redis as a primary database for developers.

    Find out more about Stevan.

    - + \ No newline at end of file diff --git a/redis-insiders/jyotsna-gupta/index.html b/redis-insiders/jyotsna-gupta/index.html index c91d4d8309..370eefeff3 100644 --- a/redis-insiders/jyotsna-gupta/index.html +++ b/redis-insiders/jyotsna-gupta/index.html @@ -4,7 +4,7 @@ Jyotsna Gupta, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Jyotsna Gupta, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Jyotsna Gupta
    Author:
    Jyotsna Gupta, Redis Insider

    Jyotsna Gupta headshot

    "I am an Open Source Enthusiast, working as a Senior Software Engineer at Gojek (GoPay). I am based out of Bangalore, India. I have been using Redis for the last 3 years.

    "I applied to become a Redis Insider because I am highly passionate about communities and love to share and learn together, and I am currently trying to get the expertise in Redis at my current workplace.

    "Apart from my work and volunteering, I energize myself by playing badminton, table tennis, Carrom, chess and basketball. I prefer to spend most of the time in my room, else I feel to travel the world. If you don’t see me doing any of the above, then you’ll find probably find me sleeping and dreaming for hours, maybe days?"

    Find Jyotsna online at:

    - + \ No newline at end of file diff --git a/redis-insiders/michael-owolabi/index.html b/redis-insiders/michael-owolabi/index.html index 22c0d6d4f6..1b898c8d90 100644 --- a/redis-insiders/michael-owolabi/index.html +++ b/redis-insiders/michael-owolabi/index.html @@ -4,7 +4,7 @@ Michael Owolabi, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Michael Owolabi, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Michael Owolabi
    Author:
    Michael Owolabi, Redis Insider

    Michael Owolabi headshot

    "I am a Senior Software Engineer at Spleet and have been using Redis for 2+ years. I build software products using Node.js."

    "I am based in Lagos, Nigeria, and, in my spare time, I enjoy traveling, adventure, writing and volunteering.

    "I applied to become a Redis Insider because as a lover of Redis myself, I wanted an opportunity to meet with and learn from other Redis professionals around the globe and also share my knowledge of Redis through speaking and writing."

    Find Michael online at:

    - + \ No newline at end of file diff --git a/redis-insiders/moiz-kapasi/index.html b/redis-insiders/moiz-kapasi/index.html index 15c7c1a9ed..a46fc20fdb 100644 --- a/redis-insiders/moiz-kapasi/index.html +++ b/redis-insiders/moiz-kapasi/index.html @@ -4,7 +4,7 @@ Moiz Kapasi, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Moiz Kapasi, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Moiz Kapasi
    Author:
    Moiz Kapasi, Redis Insider

    Moiz Kapasi headshot

    "I am a Solution Architect at Capgemini and have been using Redis for 1.5 years. I build/manage/enhance various applications in the enterprise landscape of a major car manufacturer in Europe chain using Java/J2EE stack.

    "I am based in Paris, France. In my spare time, I enjoy cycling along the River Seine, camping, reading classic literature and philately.

    "I applied to become a Redis Insider because the simplicity and the power of Redis fascinates me. We can do so much with this incredible software. I dug deep in Redis only in the last 15 months and I feel I am still scratching the surface. So many more things to do, so many applications to be made better, faster and more reliable by using the power of Redis."

    Find Moiz online at:

    - + \ No newline at end of file diff --git a/redis-insiders/stevan-thomas/index.html b/redis-insiders/stevan-thomas/index.html index 911118cba0..5265bc1317 100644 --- a/redis-insiders/stevan-thomas/index.html +++ b/redis-insiders/stevan-thomas/index.html @@ -4,7 +4,7 @@ Stevan Thomas, Redis Insider | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Stevan Thomas, Redis Insider


    Profile picture for Suze Shardlow
    Author:
    Suze Shardlow, Developer Community Manager at Redis
    Profile picture for Stevan Thomas
    Author:
    Stevan Thomas, Redis Insider

    Stevan Thomas headshot

    "I am a Senior Software Engineer at Vela and have been using Redis for 5 years. I build web, mobile and desktop apps for a variety of industries including shipping / logistics, financial, retail and Hhalth using JavaScript, TypeScript, Node.js, React, Swift, Java, C# and Python.

    "I am based in Port of Spain, Trinidad and Tobago and, in my spare time, I enjoy hiking, fitness/CrossFit, watching movies and learning new technology.

    "I applied to become a Redis Insider because I want to be a positive influence in the adoption of Redis as a primary database for developers and business owners."

    - + \ No newline at end of file diff --git a/redis-live/index.html b/redis-live/index.html index cc075567e1..abe955d037 100644 --- a/redis-live/index.html +++ b/redis-live/index.html @@ -4,7 +4,7 @@ Redis Live | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    Redis Live

    The Redis Developer Relations Team hosts a variety of live content on Twitch, YouTube and Discord. Follow us to get notified when we go live. And, you know, like and subscribe!

    Upcoming Events

    DateTimeStreamersShow

    Past Events

    DateStreamersShow
    Thursday, Sept 21Simon PrickettIntroducing Polygon Search in Redis Stack 7.2
    Thursday, July 20Simon PrickettSimon's IoT Show: Counting Aircraft!
    Tuesday, July 4Simon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack and Python - Episode 4
    Thursday, June 1Simon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack and Python - Episode 3
    Friday, May 19Simon PrickettSimon's IoT Show: Searching and Indexing Image Data with Redis Stack
    Thursday, April 27Simon PrickettSimon's IoT Show: Storing Photos in Redis with the Raspberry Pi Camera and Python
    Friday, March 31Savannah Noremsavannah_streams_in_snake_case A Game! Episode 7
    Friday, March 24Savannah Noremsavannah_streams_in_snake_case A Game! Episode 6
    Thursday, March 23Simon PrickettSimon's IoT Show: Plane Spotting with Redis - Episode 6
    Tuesday, March 14Simon PrickettSimon's IoT Show: Plane Spotting with Redis - Episode 5
    Friday, March 10Savannah Noremsavannah_streams_in_snake_case A Game! Episode 5
    Friday, March 3Savannah Noremsavannah_streams_in_snake_case A Game! Episode 4
    Thursday, March 2Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 4
    Friday, February 24Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 3
    Tuesday, February 21Savannah Noremsavannah_streams_in_snake_case A Game! Ep 3
    Monday, February 20Simon PrickettSimon's Redis IoT Show: Plane Spotting with Redis and Node.js Episode 2
    Friday, February 17Savannah Noremsavannah_streams_in_snake_case A Game! Ep 2
    Thursday, February 16Simon PrickettSimon's Things on Thursdays: Plane Spotting with Redis and Node.js Episode 1
    Tuesday, February 7Savannah Noremsavannah_streams_in_snake_case A Game! Ep 1
    Thursday, February 2Simon PrickettSimon's Things on Thursdays: More Redis Streams! (Episode 2)
    Thursday, January 26Simon PrickettSimon's Things on Thursdays: More Redis Streams! (Episode 1)
    Tuesday, January 24Savannah Noremsavannah_streams_in_snake_case
    Monday, January 23Justin CastillaDo Birds Dream in JSON? - Episode 7
    Friday, January 20Savannnah Norem, Justin CastillaThis Week On Discord
    Wednesday, January 18Justin CastillaDo Birds Dream in JSON? (Episode 6 - Integrating Fast API)
    Tuesday, January 17Savannah Noremsavannah_streams_in_snake_case
    Friday, January 13Suze Shardlow, Savannnah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, January 12Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, January 12Simon PrickettSimon's Things on Thursdays: Introduction to Redis Stack for IoT Projects
    Friday, January 6Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    2022 Events
    Friday, December 16Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 15Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, December 15Simon PrickettSimon's Things on Thursdays: Node-RED Episode 2
    Monday, December 12Justin CastillaDo Birds Dream in JSON? - Episode 5
    Friday, December 9Savannah Norem, Justin CastillaRU204: Live Day 5
    Friday, December 9Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 8Savannah Norem, Justin CastillaRU204: Live Day 4
    Thursday, December 8Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, December 8Simon PrickettSimon's Things on Thursdays: Node-RED Episode 1
    Wednesday, December 7Savannah Norem, Justin CastillaRU204: Live Day 3
    Tuesday, December 6Savannah Norem, Justin CastillaRU204: Live Day 2
    Monday, December 5Justin CastillaDo Birds Dream in JSON? - Episode 4
    Monday, December 5Savannah Norem, Justin CastillaRU204: Live Day 1
    Friday, December 2Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, December 1Suze Shardlow, Simon Prickett and Justin CastillaRedis University Office Hours
    Thursday, December 1Simon PrickettSimon's Things on Thursdays: Wifi Setup with Raspberry Pi Pico W
    Friday, November 25Suze Shardlow and Simon PrickettThis Week On Discord
    Thursday, November 24Suze Shardlow and Simon PrickettRedis University Office Hours
    Thursday, November 24Simon PrickettSimon's Things on Thursdays: Synchronized Counting with Keyspace Notifications - Episode 2
    Tuesday, November 22Savannah Norem .savannah_streams_in_snake_case
    Monday, November 21Justin CastillaDo Birds Dream in JSON? - Episode 3
    Friday, November 18Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, November 17Justin CastillaRedis University Office Hours
    Thursday, November 17Simon PrickettSimon's Things on Thursdays: Synchronized Counting with Keyspace Notifications - Episode 1
    Monday, November 14Justin CastillaDo Birds Dream in JSON? - Episode 2
    Thursday, November 10Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, November 10Savannah Norem and Justin CastillaRedis University Office Hours
    Friday, November 4Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, November 3Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaRedis University Office Hours
    Thursday, November 3Simon PrickettSimon's Things on Thursdays - Cheerlights with MQTT and Redis Streams
    Monday, October 31Justin CastillaDo Birds Dream in JSON? - Episode 1
    Friday, October 28Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 27Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 27Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 5
    Wednesday, October 26Steve LorelloCoding with Steve: Redis OM .NET - Episode 10
    Tuesday, October 25Savannah Noremsavannah_streams_in_snake_case
    Friday, October 21Suze Shardlow, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 20Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 20Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 4
    Wednesday, October 19Steve LorelloCoding with Steve: Redis OM .NET - Episode 9
    Tuesday, October 18Savannah Noremsavannah_streams_in_snake_case
    Friday, October 14Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, October 13Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Wednesday, October 12Steve LorelloCoding with Steve: Redis OM .NET - Episode 8
    Friday, October 7Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 5
    Friday, October 7Justin Castilla, Suze Shardlow and Simon PrickettThis Week on Discord
    Thursday, October 6Suze Shardlow and Simon PrickettWeekly Redis University Office Hours
    Thursday, October 6Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 3
    Tuesday, October 4Savannah Noremsavannah_streams_in_snake_case - Stream a Little Stream - Episode 5
    Friday, September 30Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, September 29Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Friday, September 23Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 4
    Friday, September 23Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, September 22Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaWeekly Redis University Office Hours
    Thursday, September 22Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 2
    Tuesday, September 20Savannah Noremsavannah_streams_in_snake_case - Stream a Little Stream - Episode 4
    Friday, September 16Savannah Norem and Justin CastillaThis Week On Discord
    Thursday, September 15Savannah Norem and Justin CastillaWeekly Redis University Office Hours
    Friday, September 9Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaThis Week On Discord
    Thursday, September 8Suze Shardlow, Savannah Norem, Simon Prickett and Justin CastillaWeekly Redis University Office Hours
    Thursday, September 8Simon PrickettSimon's Things on Thursdays - Raspberry Pi Pico W - Episode 1
    Wednesday, September 7Steve LorelloCoding with Steve: Redis OM .NET - Episode 7
    Tuesday, September 6Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 3
    Friday, September 2Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 3
    Friday, September 2Suze Shardlow, Savannah Norem and Simon PrickettThis Week On Discord
    Thursday, September 1Simon PrickettSimon's Things on Thursdays - Hardware Bloom Filter
    Wednesday, August 31Steve LorelloCoding with Steve: Redis OM .NET - Episode 6
    Tuesday, August 30Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 2
    Friday, August 26Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 2
    Wednesday, August 24Steve LorelloCoding with Steve: Redis OM .NET - Episode 5
    Tuesday, August 23Savannah Noremsavannah_streams_in_snake_case: Stream a Little Stream - Episode 1
    Thursday, August 18Simon PrickettSimon's Things on Thursdays
    Tuesday, August 16Savannah Noremsavannah_streams_in_snake_case
    Friday, August 12Guy RoyseBuilding Redis MUD - Web, Whimsy, and Redis Stack: Episode 1
    Wednesday, August 10Steve LorelloCoding with Steve
    Tuesday, August 9Savannah Noremsavannah_streams_in_snake_case
    Friday, August 5Simon PrickettIoT with Redis: Introduction
    Wednesday, August 3Steve LorelloSteve Works on Redis OM .NET
    Tuesday, August 2Savannah Noremsavannah_streams_in_snake_case
    Friday, July 29Savannah Norem, Simon PrickettFirst Steps to Open Source Contribution
    Thursday, July 28Simon PrickettUp and Running with RU203: Querying, Indexing and Full-Text Search
    Wednesday, July 27Steve LorelloSteve Works on cli.redis.io
    Tuesday, July 26Savannah Noremsavannah_streams_in_snake_case - Probabilistic Data Structures
    Wednesday, July 20Steve LorelloSteve Works on cli.redis.io
    Friday, July 15Guy RoyseExploring Bun and Node Redis
    Thursday, July 14Simon PrickettIntroduction to Redis Streams with RedisInsight, Node and Python
    Wednesday, July 13Steve LorelloSteve Works on Redis OM .NET
    Friday, July 8Guy RoyseGraph, Graph, and Graph
    Thursday, July 7Simon PrickettUp and Running with the RU202 Redis Streams Course
    Wednesday, July 6Steve LorelloSteve Works on Redis OM .NET
    Thursday, June 30Savannah Norem, Guy RoyseComparing Sets, Bloom Filters, and Cuckoo Filters
    Wednesday, June 29Steve LorelloSteve Works on Redis OM .NET!
    Tuesday, June 28Simon PrickettUp and Running with the RU102J Redis University Course
    Friday, June 24Guy RoyseGraph, Graph, and Graph
    Thursday, June 23Justin Castilla, Savannah NoremRedis OM: Python + JSON + Search
    Thursday, June 16Simon Prickett, Justin CastillaCounting Things At Scale with Redis
    Wednesday, June 15Simon PrickettUp and Running with the RU102JS Redis University Course
    Wednesday, June 15Simon PrickettUp and Running with the RU102PY Redis University Course
    Friday, June 10Guy RoyseWorking on Redis OM for Node.js
    Friday, June 10Justin Castilla, Suze ShardlowWorking with Redis Data Structures
    Friday, June 3Guy RoyseTracking Aircraft with Redis + Software-Defined Radio
    - + \ No newline at end of file diff --git a/tags/community/index.html b/tags/community/index.html index a1e058fcec..8ffdb1a1c6 100644 --- a/tags/community/index.html +++ b/tags/community/index.html @@ -4,7 +4,7 @@ One doc tagged with "community" | The Home of Redis Developers - + @@ -12,7 +12,7 @@

    One doc tagged with "community"

    View All Tags
    - + \ No newline at end of file diff --git a/tags/index.html b/tags/index.html index e05bb40192..0c213aad90 100644 --- a/tags/index.html +++ b/tags/index.html @@ -4,7 +4,7 @@ Tags | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file diff --git a/tools/index-tools/index.html b/tools/index-tools/index.html index 3d0c1b0e8a..7004d8be8b 100644 --- a/tools/index-tools/index.html +++ b/tools/index-tools/index.html @@ -4,7 +4,7 @@ index-tools | The Home of Redis Developers - + @@ -18,7 +18,7 @@ hide_table_of_contents: true slug: /tools/ custom_edit_url:


    - + \ No newline at end of file diff --git a/tools/riot/index.html b/tools/riot/index.html index 79c25bb233..4279c747c8 100644 --- a/tools/riot/index.html +++ b/tools/riot/index.html @@ -4,7 +4,7 @@ RIOT | The Home of Redis Developers - + @@ -14,7 +14,7 @@

    RIOT

    Redis Input/Output Tools (RIOT) is a set of import/export command line utilities for Redis:

    • RIOT Redis: live replication from any Redis database (including AWS Elasticache) to another Redis database.
    • RIOT DB: migrate from an RDBMS to Redis, Search, JSON, ...

    Most database migration tools available today are offline in nature. Migrating data from AWS ElastiCache to Redis Enterprise Cloud for example means backing up your Elasticache data to an AWS S3 bucket and importing it into Redis Enterprise Cloud using its UI.This implies some downtime and might result in data loss. Other available techniques include creating point-in-time snapshots of the source Redis server & applying the changes to the destination servers to keep both servers in sync. It might sound like a good approach but can be challenging when you have to maintain dozens of scripts to implement the migration strategy.

    RIOT Redis is a migration tool that allows for seamless live replication between two Redis databases.

    1. Getting Started

    Download the latest release and unzip the archive.

    Launch the bin/riot-redis script and follow the usage information provided.

    2. Build and Run

    git clone https://github.com/redis-developer/riot.git
    cd riot/riot-redis
    ./riot-redis

    3. Install via Homebrew (macOS)

    brew install jruaux/tap/riot-redis`

    Usage

    ❯ riot-redis
    Usage: {app} [OPTIONS] [COMMAND]
    --help Show this help message and exit.
    -V, --version Print version information and exit.
    -q, --quiet Log errors only
    -d, --debug Log in debug mode (includes normal stacktrace)
    -i, --info Set log level to info

    You can use --help on any subcommand:

    ❯ riot-redis --help

    ❯ riot-redis import --help

    ❯ riot-redis import .. hset --help

    Redis connection options are the same as redis-cli:

      -h, --hostname=<host>     Server hostname (default: 127.0.0.1)
    -p, --port=<port> Server port (default: 6379)
    -s, --socket=<socket> Server socket (overrides hostname and port)
    --user=<username> Used to send ACL style 'AUTH username pass'. Needs password.
    -a, --pass[=<password>] Password to use when connecting to the server
    -u, --uri=<uri> Server URI
    -o, --timeout=<sec> Redis command timeout (default: 60)
    -n, --db=<int> Database number (default: 0)
    -c, --cluster Enable cluster mode
    -t, --tls Establish a secure TLS connection
    -l, --latency Show latency metrics
    -m, --pool=<int> Max pool connections (default: 8)

    Redis URI syntax is described here.

    4. Example

    Here is an example of a live replication from a source Redis running on localhost and port 6379, to a target Redis running on localhost and port 6380:

    ❯ riot-redis -h source -p 6379 replicate --idle-timeout 500 -h target -p 6380 --live

    5. Verification

    Once replication is complete RIOT Redis will perform a verification step to compare values and TTLs between source and target databases. The output looks like this:

    OK:1000 V:0 >:0 <:0 T:0
    • OK: # identical values

    • V: # mismatched values

    • : # keys only present in source database

    • <: # keys only present in target database

    • T: # keys with TTL difference greater than tolerance

    6. Architecture

    RIOT Redis implements client-side replication using a producer/consumer approach:

    • the producer is connected to the source Redis (e.g. ElastiCache) and iterates over keys to read their corresponding values

    • the consumer is connected to the target Redis (e.g. Redis Enterprise Cloud) and writes the key/value tuples previously created

    1. Key reader: initiates a SCAN and optionally calls SUBSCRIBE to listen for keyspace notifications (live replication).
    2. Value reader: takes the keys and calls DUMP and TTL.
    3. Key/Value writer: takes key/value/ttl tuples and calls RESTORE and EXPIRE.
    note

    Live replication makes use of keyspace notifications. Make sure the source Redis database has keyspace notifications enabled using notify-keyspace-events = KA in redis.conf or via CONFIG SET.

    note

    The live replication mechanism does not guarantee data consistency. Redis sends keyspace notifications over pub/sub which does not provide guaranteed delivery. It is possible that RIOT Redis can miss some notifications in case of network failures for example.

    - + \ No newline at end of file diff --git a/tutorials/redisearch/getting-started/index.html b/tutorials/redisearch/getting-started/index.html index 343b4c6794..39b6f2a09b 100644 --- a/tutorials/redisearch/getting-started/index.html +++ b/tutorials/redisearch/getting-started/index.html @@ -4,7 +4,7 @@ Redis Search Getting Started | The Home of Redis Developers - + @@ -12,7 +12,7 @@
    - + \ No newline at end of file