diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..be7914c --- /dev/null +++ b/404.html @@ -0,0 +1,762 @@ + + + + + + + + + + + + + + + + + + + Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..483d8bf --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +cloudbootcamp.dev \ No newline at end of file diff --git a/agenda/index.html b/agenda/index.html new file mode 100644 index 0000000..16b5f5e --- /dev/null +++ b/agenda/index.html @@ -0,0 +1,1324 @@ + + + + + + + + + + + + + + + + + + + + + + + Course Agenda - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Agenda

+

The following table lists the topics and coding activities for the week. Click on the name of the topic to open a pdf of the material. Click on the link to the solution code to view the solution.

+
+
+
+

Day 1

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TopicType of Activity
KickoffActivity
IntroductionsActivity
Introduction Cloud NativePresentation
ContainersPresentation
Container ActivitiesActivity
LunchActivity
Container Activities (Cont.)Activity
KubernetesPresentation
Wrap up
+

Day 2

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TopicType of Activity
Recap and review from Monday; Q&APresentation
Kubernetes ActivitiesActivity
LunchActivity
KubernetesPresentation
Wrap up
+

Day 3

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TopicType of Activity
Recap and review from Tuesday; Q&APresentation
Kubernetes ActivitiesActivity
Continuous IntegrationPresentation
LunchActivity
Continuous Integration LabActivity
Continuous DeploymentPresentation
Wrap up
+

Day 4

+ + + + + + + + + + + + + + + + + + + + + + + + + +
TopicType of Activity
Recap and review from Wednesday; Q&APresentation
Continuous Deployment LabActivity
Lunch
Project WorkActivity
+

Day 5

+ + + + + + + + + + + + + + + + + + + + + +
TopicType of Activity
Recap and review from Thursday ; Q&APresentation
Project WorkActivity
RetrospectiveActivity
+
+
+

Modules

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TopicType of ActivityDuration
ContainersPresentation1 Hour
Container ActivitiesActivity30 mins
KubernetesPresentation6 Hours
Kubernetes ActivitiesActivity4 Hours
Continuous IntegrationPresentation1 Hour
Continuous Integration LabActivity1 Hour
Continuous DeploymentPresentation1 Hour
Continuous Deployment LabActivity1 Hour
Project WorkActivity2 Hours
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000..1cf13b9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js b/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 0000000..cf778d4 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js.map b/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 0000000..8263585 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +

Application Requirements

+

Runtime and Isolation

+

Your applications must be isolated from the operating system. You should be able to run them any where. This allows you to run multiple applications on same server and also allows to control their dependencies and resources.

+

One way to achieve this is containerization. Among the different container options, Docker is popular. Container is nothing but a way to package your application and run it in an isolated environment. While developing the applications, also make sure all the dependencies are declared in your application before packaging it.

+

Resource Allocation and Scheduling

+

Your applications must include dynamic scheduling. This helps you to figure out where the application must run and this decisions are automatically taken for you by the scheduler. This scheduler collects all the informations of resources for different system and chooses the right place to run the application. Operator can override the decisions of the scheduler if he wants to.

+

Environment isolation

+

You need a proper environment isolation to differentiate dev, test, stage, production etc. based on your requirements. With out the complete duplication of your cluster, the infrastructure should be able to separate the dependencies through different application environments.

+

These environments should include all of the resources like databases, network resources etc. needed by the application. Cloud native infrastructure can create environments with very low overhead.

+

Service discovery

+

In your application, there may be multiple services. These services may depend on one another. How will they find each other if one service needs to communicate with other ? For this, the infrastructure should provide a way for services to find each other.

+

This may be in different ways. It can be using API calls or using DNS or with network proxies. There should be a service discovery mechanism in place and how you do this does not matter.

+

Usually cloud native applications make use their infrastructure for service discovery to identify the dependent services. Some of them are cloud metadata services, DNS, etcd and consul etc.

+

State Management

+

While defining your cloud native application, you should provide a mechanism to check the status of the application. This can be done by an API or hook that checks the current state of the application like if it is submitted, Scheduled, ready, healthy, unhealthy, terminating etc.

+

We usually have such capabilities in any of the orchestration platform we use. For example, if you consider Kubernetes, you can do this using events, probes and hooks. When the application is submitted, scheduled, or scaled, the event is triggered. Readiness probe checks if the application is ready and liveness probes checks if the application is healthy. Hooks are used for events that need to happen before or after processes start.

+

Monitoring and logging

+

Monitoring and logging should be a part of the cloud-native application. Dynamically monitoring all the services of the application is important. It keeps checking the entire application and is used for debugging purposes when required. Also, make sure your logging system should be able to collect all the logs and consolidate them together based on application, environments, tags etc.

+

Metrics

+

Cloud-native applications must include metrics as a part of their code. All the telemetry data needed will be provided by the metrics. This helps you to know whether your application is meeting the service-level objectives.

+

Metrics are collected at instance level and later aggregated together to provide the complete view of the application. Once the application provides metrics, underlying infrastructure will scrape them out and use them for analysis.

+

Debugging and tracing

+

When an application is deployed and problem occurs, we refer to logging system. But if that does not resolve the issue, we need distributed tracing. Distributed tracing helps us to understand what is happening in the application. They will us to debug problems by providing us an interface to visualize which is different from the details we get from logging. Also, it provides shorter feedback loops which helps you to debug distributed systems easily.

+

Application tracing is always important and make sure it is a part of your cloud-native application. If in case you cannot include it in the application, you can also enable it at infrastructure level using proxies or traffic analysis.

+

Conclusion

+

We discussed the cloud-native application design, implementations of cloud native patterns, and application life cycle. We also saw how we can design our cloud native applications using the twelve factor methodology. Along with this, we also explored what we need to include in our cloud naive application while building it.

+

References

+
    +
  • https://learning.oreilly.com/library/view/managing-cloud-native/9781492037071/[Justin Garrison, Kris Nova, (2018). Managing cloud native applications. Publisher: O'Reilly Media, Inc.]
  • +
  • https://learning.oreilly.com/library/view/cloud-native-architectures/9781787280540/[Piyum Zonooz, Erik Farr, Kamal Arora, Tom Laszewski, (2018). Cloud Native Architectures. Publisher: Packt Publishing]
  • +
  • https://12factor.net/codebase[12factor.net]
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cloud-native/images/CNCF_TrailMap_latest.png b/cloud-native/images/CNCF_TrailMap_latest.png new file mode 100644 index 0000000..1da3e1f Binary files /dev/null and b/cloud-native/images/CNCF_TrailMap_latest.png differ diff --git a/cloud-native/index.html b/cloud-native/index.html new file mode 100644 index 0000000..e6017ed --- /dev/null +++ b/cloud-native/index.html @@ -0,0 +1,1346 @@ + + + + + + + + + + + + + + + + + + + + + + + Cloud-Native - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Cloud-Native

+

Introduction

+

Cloud is everywhere. Today, many companies want to migrate their +applications on to cloud. For this migration to be done, the +applications must be re-architected in a way that they fully utilize the +advantages of the cloud.

+ + +

Presentations

+

Cloud-Native Presentation

+

What is Cloud-Native?

+

Cloud-native is about how we build and run applications taking full +advantage of cloud computing rather than worrying about where we deploy +it.

+

Cloud-native refers less to where an application resides and more to how +it is built and deployed.

+
    +
  • +

    A cloud-native application consists of discrete, reusable components + known as microservices that are designed to integrate into any cloud + environment.

    +
  • +
  • +

    These microservices act as building blocks and are often packaged in + containers.

    +
  • +
  • +

    Microservices work together as a whole to comprise an application, + yet each can be independently scaled, continuously improved, and + quickly iterated through automation and orchestration processes.

    +
  • +
  • +

    The flexibility of each microservice adds to the agility and + continuous improvement of cloud-native applications.

    +
  • +
+

CNCF Cloud Native Definition

+

Cloud native technologies empower organizations to build and run +scalable applications in modern, dynamic environments such as public, +private, and hybrid clouds. Containers, service meshes, microservices, +immutable infrastructure, and declarative APIs exemplify this approach.

+

These techniques enable loosely coupled systems that are resilient, +manageable, and observable. Combined with robust automation, they allow +engineers to make high-impact changes frequently and predictably with +minimal toil.

+

The Cloud Native Computing Foundation seeks to drive adoption of this +paradigm by fostering and sustaining an ecosystem of open source, +vendor-neutral projects. We democratize state-of-the-art patterns to +make these innovations accessible for everyone.

+

Why Cloud-Native?

+

Cloud-native applications are different from the traditional +applications that run in your data centres. The applications that are +designed in the traditional way are not built keeping cloud +compatibility in mind. They may have strong ties with the internal +systems. Also, they cannot take advantage of all the benefits of the +cloud.

+

So, we need a new architecture for our applications to utilize the +benefits of cloud. There is a need to design the applications keeping +cloud in mind and take advantage of several cloud services like storage, +queuing, caching etc.

+
    +
  • +

    Speed, safety, and scalability comes with cloud-native applications.

    +
  • +
  • +

    Helps you to quickly deliver the advancements.

    +
  • +
  • +

    Allows you to have loose ties into the corporate IT where it most + certainly would destabilize legacy architectures.

    +
  • +
  • +

    Helps you to continuously deliver your applications with zero + downtime.

    +
  • +
  • +

    Infrastructure is less predictable.

    +
  • +
  • +

    Service instances are all disposable.

    +
  • +
  • +

    Deployments are immutable.

    +
  • +
  • +

    To meet the expectations of the today’s world customers, these + systems are architected for elastic scalability.

    +
  • +
+

Cloud-native concepts

+

Some of the important characteristics of cloud-native applications are +as follows.

+
    +
  • +

    Disposable Infrastructure

    +
  • +
  • +

    Isolation

    +
  • +
  • +

    Scalability

    +
  • +
  • +

    Disposable architecture

    +
  • +
  • +

    Value added cloud services

    +
  • +
  • +

    Polyglot cloud

    +
  • +
  • +

    Self-sufficient, full-stack teams

    +
  • +
  • +

    Cultural Change

    +
  • +
+

Disposable Infrastructure

+

While creating applications on cloud, you need several cloud resources +as part of it. We often hear how easy it is to create all these +resources. But did you ever think how easy is it to dispose them. It is +definitely not that easy to dispose them and that is why you don’t hear +a lot about it.

+

In traditional or legacy applications, we have all these resources +residing on machines. If these go down, we need to redo them again and +most of this is handled by the operations team manually. So, when we are +creating applications on cloud, we bring those resources like load +balancers, databases, gateways, etc on to cloud as well along with +machine images and containers.

+

While creating these applications, you should always keep in mind that +if you are creating a resource when required, you should also be able to +destroy it when not required. Without this, we cannot achieve the +factors speed, safety and scalability. If you want this to happen, we +need automation.

+

Automation allows you to

+
    +
  • +

    Deliver new features at any time.

    +
  • +
  • +

    Deliver patches faster.

    +
  • +
  • +

    Improves the system quality.

    +
  • +
  • +

    Facilitates team scale and efficiency.

    +
  • +
+

Now you know what we are talking about. Disposable infrastructure is +nothing but Infrastructure as Code.

+

Infrastructure as Code

+

Here, you develop the code for automation exactly as same as the you do +for the rest of the application using agile methodologies.

+
    +
  • +

    Automation code is driven by a story.

    +
  • +
  • +

    Versioned in the same repository as rest of the code.

    +
  • +
  • +

    Continuously tested as part of CI/CD pipeline.

    +
  • +
  • +

    Test environments are created and destroyed along with test runs.

    +
  • +
+

Thus, disposable infrastructure lays the ground work for scalability and +elasticity.

+

Isolation

+

In traditional or legacy applications, the applications are monoliths. +So, when there is bug or error in the application, you need to fix it. +Once you changed the code, the entire application should be redeployed. +Also, there may be side effects which you can never predict. New changes +may break any components in the application as they are all inter +related.

+

In cloud-native applications, to avoid the above scenario, the system is +decomposed into bounded isolated components. Each service will be +defined as one component and they are all independent of each other. So, +in this case, when there is a bug or error in the application, you know +which component to fix and this also avoids any side effects as the +components are all unrelated pieces of code.

+

Thus, cloud-native systems must be resilient to man made errors. To +achieve this we need isolation and this avoids a problem in one +component affecting the entire system. Also, it helps you to introduce +changes quickly in the application with confidence.

+

Scalability

+

Simply deploying your application on cloud does not make it +cloud-native. To be cloud native it should be able to take full benefits +of the cloud. One of the key features is Scalability.

+

In today’s world, once your business starts growing, the number of users +keep increasing and they may be from different locations. Your +application should be able to support more number of devices and it +should also be able to maintain its responsiveness. Moreover, this +should be efficient and cost-effective.

+

To achieve this, cloud native application runs in multiple runtimes +spread across multiple hosts. The applications should be designed and +architected in a way that they support multi regional, active-active +deployments. This helps you to increase the availability and avoids +single point of failures.

+

Disposable architecture

+

Leveraging the disposable infrastructure and scaling isolated components +is important for cloud native applications. Disposable architecture is +based on this and it takes the idea of disposability and replacement to +the next level.

+

Most of us think in a monolithic way because we got used to traditional +or legacy applications a lot. This may lead us to take decisions in +monolithic way rather than in cloud native way. In monoliths, we tend to +be safe and don’t do a lot of experimentation. But Disposable +architecture is exactly opposite to monolithic thinking. In this +approach, we develop small pieces of the component and keep +experimenting with it to find an optimal solution.

+

When there is a breakthrough in the application, you can’t simply take +decisions based on the available information which may be incomplete or +inaccurate. So, with disposable architecture, you start with small +increments, and invest time to find the optimal solution. Sometimes, +there may be a need to completely replace the component, but that +initial work was just the cost of getting the information that caused +the breakthrough. This helps you to minimize waste allowing you to use +your resources on controlled experiments efficiently and get good value +out of it in the end.

+

Value added cloud services

+

When you are defining an application, there are many things you need to +care of. Each and every service will be associated with many things like +databases, storage, redundancy, monitoring, etc. For your application, +along with your components, you also need to scale the data. You can +reduce the operational risk and also get all such things at greater +velocity by leveraging the value-added services that are available on +cloud. Sometimes, you may need third party services if they are not +available on your cloud. You can externally hook them up with your +application as needed.

+

By using the value added services provided by your cloud provider, you +will get to know all the available options on your cloud and you can +also learn about all the new services. This will help you to take good +long-termed decisions. You can definitely exit the service if you find +something more suitable for your component and hook that up with your +application based on the requirements.

+

Polyglot cloud

+

Most of you are familiar with Polyglot programming. For your +application, based on the component, you can choose the programming +languages that best suits it. You need not stick to a single programming +language for the entire application. If you consider Polyglot +persistence, the idea is choose the storage mechanism that suits better +on a component by component basis. It allows a better global scale.

+

Similarly, the next thing will be Polyglot cloud. Like above, here you +choose a cloud provider that better suits on a component by component +basis. For majority of your components, you may have a go to cloud +provider. But, this does not stop you from choosing a different one if +it suits well for any of your application components. So, you can run +different components of your cloud native system on different cloud +providers based on your requirements.

+

Self-sufficient, full-stack teams

+

In a traditional set up, many organizations have teams based on skill +set like backend, user interface, database, operations etc. Such a +structure will not allow you to build cloud native systems.

+

In cloud native systems, the system is composed of bounded isolated +components. They have their own resources. Each of such component must +be owned by self-sufficient, full stack team. That team is entirely +responsible for all the resources that belong to that particular +component. In this set up, team tends to build quality up front in as +they are the ones who deploy it and they will be taking care of it if +the component is broken. It is more like you build it and then you run +it. So, the team can continuously deliver advancements to the components +at their own pace. Also, they are completely responsible for delivering +it safely.

+

Cultural Change

+

Cloud native is different way of thinking. We need to first make up our +minds, not just the systems, to utilize the full benefits of cloud. +Compared to the traditional systems, there will be lots of things we do +differently in cloud-native systems.

+

To make that happen, cultural change is really important. To change the +thinking at high level, we just to first prove that the low level +practices can truly deliver and encourage lean thinking. With this +practice, you can conduct experimentation. Based on the feedback from +business, you can quickly and safely deliver your applications that can +scale.

+

Cloud-native Roadmap

+

You can define your cloud native road map in many ways. You can get +there by choosing different paths. Let us see the trail map defined by +CNCF.

+

CNCF defined the Cloud Native Trail Map providing an overview for +enterprises starting their cloud native journey as follows.

+

This cloud map gives us various steps that an engineering team may use +while considering the cloud native technologies and exploring them. The +most common ones among them are Containerization, CI/CD, and +Orchestration. Next crucial pieces will be Observability & Analysis and +Service Mesh. And later comes the rest of them like Networking, +Distributed Database, Messaging, Container runtime, and software +distribution based on your requirements.

+

CNCF_TrailMap_latest.png

+
    +
  • +

    With out Containerization, you cannot build cloud native + applications. This helps your application to run in any computing + environment. Basically, all your code and dependencies are packaged + up together in to a single unit here. Among different container + platforms available, Docker is a preferred one.

    +
  • +
  • +

    To bring all the changes in the code to container automatically, it + is nice to set up a CI/CD pipeline which does that. There are many + tools available like jenkins, travis, etc.

    +
  • +
  • +

    Since we have containers, we need container orchestration to manage + the container lifecycles. Currently, Kubernetes is one solution + which is popular.

    +
  • +
  • +

    Monitoring and Observability plays a very important role. It is good + to set up some of them like logging, tracing, metrics etc.

    +
  • +
  • +

    To enable more complex operational requirements, you can use a + service mesh. It helps you out with several things like service + discovery, health, routing, A/B testing etc. Istio is one of the + examples of service mesh.

    +
  • +
  • +

    Networking plays a crucial role. You should define flexible + networking layers based on your requirements. For this, you can use + Calico, Weave Net etc.

    +
  • +
  • +

    Sometimes, you may need distributed databases. Based on your + requirements, if you need more scalability and resiliency, these are + required.

    +
  • +
  • +

    Messaging may be required sometimes too. Go with different messaging + queues like Kafka, RabbitMQ etc available when you need them.

    +
  • +
  • +

    Container Registry helps you to store all your containers. You can + also enable image scanning and signing if required.

    +
  • +
  • +

    As a part of your application, sometimes you may need a secure + software distribution.

    +
  • +
+

Also, if you want to see the cloud native landscape, check it out +here.

+

Summary

+

In this, we covered the fundamentals of cloud native systems. You now +know what cloud native is, why we need it and how it is important. Cloud +native is not just deploying your application on cloud but it is more of +taking full advantages of cloud. Also, from cloud-native roadmap, you +will get an idea on how to design and architect your cloud-native +system. You can also get the idea of different tools, frameworks, +platforms etc from the cloud-native landscapes.

+

Also, if you are interesting in knowing more, we have Cloud-Native: A +Complete Guide. Feel free +to check this out.

+

References

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cloud-native/materials/01-What-Is-Cloud-Native.pdf b/cloud-native/materials/01-What-Is-Cloud-Native.pdf new file mode 100644 index 0000000..389d4b0 Binary files /dev/null and b/cloud-native/materials/01-What-Is-Cloud-Native.pdf differ diff --git a/cloud-native/materials/01-What-Is-Cloud-Native.pptx.zip b/cloud-native/materials/01-What-Is-Cloud-Native.pptx.zip new file mode 100644 index 0000000..acd34a2 Binary files /dev/null and b/cloud-native/materials/01-What-Is-Cloud-Native.pptx.zip differ diff --git a/cloudnative-challenge/index.html b/cloudnative-challenge/index.html new file mode 100644 index 0000000..13d4130 --- /dev/null +++ b/cloudnative-challenge/index.html @@ -0,0 +1,1464 @@ + + + + + + + + + + + + + + + + + + + + + + + Cloud Native Challenge - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Cloud Native Challenge

+

Phase 1 - Local Develop

+
    +
  • Start by creating a Github Repo for your application.
  • +
  • Choose NodeJS, Python, or React.
  • +
  • Site about one of the following:
      +
    • Yourself
    • +
    • Hobby
    • +
    • Place you live
    • +
    +
  • +
  • Must be able to run locally
  • +
+

Application Requirements

+
    +
  • Minimum of 3 webpages
  • +
  • Minimum of 1 GET and POST method each.
  • +
  • SwaggerUI Configured for API Testing.
  • +
  • API's exposed through Swagger
  • +
  • Custom CSS files for added formatting.
  • +
+

Testing

+

Setup each of the following tests that apply:

+
    +
  • Page tests
  • +
  • API tests
  • +
  • Connection Tests
  • +
+

Phase 2 - Application Enhancements

+

Database Connectivity and Functionality

+
    +
  • Add local or cloud DB to use for data collection.
  • +
  • Use 3rd party API calls to get data.
      +
    • Post Data to DB via API Call
    • +
    • Retrieve Data from DB via API Call
    • +
    • Delete Data from DB via API Call
    • +
    +
  • +
+

Phase 3 - Containerize

+

Container Image

+
    +
  • Create a DockerFile
  • +
  • Build your docker image from the dockerfile
  • +
  • Run it locally via Docker Desktop or another docker engine.
  • +
+

Image Registries

+
    +
  • Once validation of working docker image, push the image up to a registry.
  • +
  • Use one of the following registries:
      +
    • Docker
    • +
    • Quay.io
    • +
    • IBM Container
    • +
    +
  • +
  • Push the image up with the following name: {DockerRegistry}/{yourusername}/techdemos-cn:v1
  • +
+

Phase 4 - Kubernetes Ready

+

Create Pod and Deployment files

+
    +
  • Create a Pod YAML to validate your image.
  • +
  • Next, create a deployment yaml file with the setting of 3 replicas.
  • +
  • Verify starting of deployment
  • +
  • Push all YAML files to Github
  • +
+

Application Exposing

+
    +
  • Create a Service and Route yaml
  • +
  • Save Service and Route yamls in Github
  • +
+

Configuration Setup

+
    +
  • Create a ConfigMap for all site configuration.
  • +
  • Setup Secrets for API keys or Passwords to 3rd parties.
  • +
  • Add storage where needed to deployment.
  • +
+

Phase 5 - Devops/Gitops

+

Tekton Pipeline Setup

+
    +
  • Create a Tekton pipeline to do the following:
      +
    • Setup
    • +
    • Test
    • +
    • Build and Push Image
    • +
    • GitOps Version Update
    • +
    +
  • +
  • Make each of the above their own task.
  • +
  • Setup triggers to respond to Github commits and PR's
  • +
+

GitsOps Configuration

+
    +
  • Use ArgoCD to setup Deployment.
  • +
  • Test your ArgoCD deployment
  • +
  • Make a change to site and push them.
  • +
  • Validate new image version.
  • +
+

Extras

+

Chatbot Functions

+
    +
  • Watson Assistant Integration
  • +
  • Conversation about your sites topic.
  • +
  • Have Chat window or page.
  • +
  • Integrate Watson Assistant Actions.
  • +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/containers/imageregistry/index.html b/containers/imageregistry/index.html new file mode 100644 index 0000000..79b3592 --- /dev/null +++ b/containers/imageregistry/index.html @@ -0,0 +1,1077 @@ + + + + + + + + + + + + + + + + + + + + + + + Image Registries - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Image Registries

+

A registry is a repository used to store and access container images. Container registries can support container-based application development, often as part of DevOps processes.

+

Container registries save developers valuable time in the creation and delivery of cloud-native applications, acting as the intermediary for sharing container images between systems. They essentially act as a place for developers to store container images and share them out via a process of uploading (pushing) to the registry and downloading (pulling) into another system, like a Kubernetes cluster.

+

Learn More

+
+
+
+

Make sure you have Docker Desktop installed and up and running.

+
Login to Quay
docker login quay.io
+Username: your_username
+Password: your_password
+Email: your_email
+
+

First we'll create a container with a single new file based off of the busybox base image: +

Create a new container
docker run busybox echo "fun" > newfile
+
+ The container will immediately terminate, so we'll use the command below to list it: +
docker ps -l
+
+ The next step is to commit the container to an image and then tag that image with a relevant name so it can be saved to a respository.

+

Replace "container_id" with your container id from the previous command. +

Create a new image
docker commit container_id quay.io/your_username/repository_name
+
+ Be sure to replace "your_username" with your quay.io username and "respository_name" with a unique name for your repository.

+

Now that we've tagged our image with a repository name, we can push the respository to Quay Container Registry: +

Push the image to Quay
docker push quay.io/your_username/repository_name
+
+ Your respository has now been pushed to Quay Container Registry!

+

To view your repository, click on the button below:

+

Repositories

+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/containers/index.html b/containers/index.html new file mode 100644 index 0000000..095e1da --- /dev/null +++ b/containers/index.html @@ -0,0 +1,1231 @@ + + + + + + + + + + + + + + + + + + + + + + + Containers Introduction - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

Containers Introduction

+

You wanted to run your application on different computing environments. It may be your laptop, test environment, staging environment or production environment.

+

So, when you run it on these different environments, will your application work reliably ?

+

What if some underlying software changes ? What if the security policies are different ? or something else changes ?

+

To solve this problems, we need Containers.

+

Containers

+

Containers are a standard way to package an application and all its dependencies so that it can be moved between environments and run without change. They work by hiding the differences between applications inside the container so that everything outside the container can be standardized.

+

For example, Docker created standard way to create images for Linux Containers.

+ + +

Presentations

+

Container Basics

+

Why containers ?

+
    +
  • We can run them anywhere.
  • +
  • They are lightweight .
  • +
  • Isolate your application from others.
  • +
+ + +

Different Container Standards

+

There are many different container standards available today. Some of them are as follows.

+

Docker - The most common standard, made Linux containers usable by the masses.

+

Rocket (rkt) - An emerging container standard from CoreOS, the company that developed etcd.

+

Garden - The format Cloud Foundry builds using buildpacks.

+

Among them, Docker was one of the most popular mainstream container software tools.

+

Open Container Initiative (OCI)

+

A Linux Foundation project developing a governed container standard. Docker and Rocket are OCI-compliant. But, Garden is not.

+

Benefits

+
    +
  • Lightweight
  • +
  • Scalable
  • +
  • Efficient
  • +
  • Portable
  • +
  • Supports agile development
  • +
+

To know more about Containerization, we have couple of guides. Feel free to check them out.

+ +

Docker

+

Docker is one of the most popular Containerization platforms which allows you to develop, deploy, and run application inside containers.

+
    +
  • It is an open source project.
  • +
  • Can run it anywhere.
  • +
+ + +

An installation of Docker includes an engine. This comes with a daemon, REST APIs, and CLI. Users can use CLI to interact with the docker using commands. These commands are sent to the daemon which listens for the Docker Rest APIs which in turn manages images and containers. The engine runs a container by retrieving its image from the local system or registry. A running container starts one or more processes in the Linux kernel.

+

Docker Image

+

A read-only snapshot of a container that is stored in Docker Hub or in private repository. You use an image as a template for building containers.

+

These images are build from the Dockerfile.

+

Dockerfile

+
    +
  • It is a text document that contains all the instructions that are necessary to build a docker image.
  • +
  • It is written in an easy-to-understand syntax.
  • +
  • It specifies the operating system.
  • +
  • It also includes things like environmental variables, ports, file locations etc.
  • +
+

If you want to try building docker images, try this course on O'Reilly (Interactive Learning Platform).

+ +

Docker Container

+

The standard unit where the application service is located or transported. It packages up all code and its dependencies so that the application runs quickly and reliably from one computing environment to another.

+

If you want to try deploying a docker container, try this course on O'Reilly (Interactive Learning Platform).

+

Docker Engine

+

Docker Engine is a program that creates, ships, and runs application containers. The engine runs on any physical or virtual machine or server locally, in private or public cloud. The client communicates with the engine to run commands.

+

If you want to learn more about docker engines, try this course on O'Reilly

+

Docker Registry

+

The registry stores, distributes, and shares container images. It is available in software as a service (SaaS) or in an enterprise to deploy anywhere you that you choose.

+

Docker Hub is a popular registry. It is a registry which allows you to download docker images which are built by different communities. You can also store your own images there. You can check out various images available on docker hub here.

+ + +

References

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/containers/materials/02-Containers-Basics.pdf b/containers/materials/02-Containers-Basics.pdf new file mode 100644 index 0000000..b7a9631 Binary files /dev/null and b/containers/materials/02-Containers-Basics.pdf differ diff --git a/containers/materials/02-Containers-Basics.pptx.zip b/containers/materials/02-Containers-Basics.pptx.zip new file mode 100644 index 0000000..b115f14 Binary files /dev/null and b/containers/materials/02-Containers-Basics.pptx.zip differ diff --git a/containers/reference/index.html b/containers/reference/index.html new file mode 100644 index 0000000..b4b00ef --- /dev/null +++ b/containers/reference/index.html @@ -0,0 +1,1294 @@ + + + + + + + + + + + + + + + + + + + + + + + References - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Containers

+

Containers are a standard way to package an application and all its dependencies so that it can be moved between environments and run without change. They work by hiding the differences between applications inside the container so that everything outside the container can be standardized.

+

For example, Docker created standard way to create images for Linux Containers.

+

Basic Docker Commands

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ActionCommand
Get Docker versiondocker version
Run hello-world Containerdocker run hello-world
List Running Containersdocker ps
Stop a containerdocker stop <container-name/container-id>
List Docker Imagesdocker images
Login into registrydocker login
Build an imagedocker build -t <image_name>:<tag> .
Inspect a docker objectdocker inspect <name/id>
Inspect a docker imagedocker inspect image <name/id>
Pull an imagedocker pull <image_name>:<tag>
Push an Imagedocker push <image_name>:<tag>
Remove a containerdocker rm <container-name/container-id>
+

Running Docker

+
+
+
+
    +
  1. +

    Install Docker Desktop

    +
  2. +
  3. +

    Test it out

    +
  4. +
+
+
+
    +
  1. +

    Install ibmcloud CLI +

    curl -fsSL https://clis.cloud.ibm.com/install/osx | sh
    +

    +
  2. +
  3. +

    Verify installation +

    ibmcloud help
    +

    +
  4. +
  5. +

    Configure environment. Go to cloud.ibm.com -> click on your profile -> Log into CLI and API and copy IBM Cloud CLI command. It will look something like this: +

    ibmcloud login -a https://cloud.ibm.com -u passcode -p <password>
    +

    +
  6. +
  7. +

    Log into docker through IBM Cloud +

    ibmcloud cr login --client docker
    +

    +
  8. +
+
+
+
+ +

Activities

+

| Task | Description | Link | Time | +| ----------------------- | --------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------- | ------ | | | | +| IBM Container Registry | Build and Deploy Run using IBM Container Registry | IBM Container Registry | 30 min | +| Docker Lab | Running a Sample Application on Docker | Docker Lab | 30 min |

+

Once you have completed these tasks, you should have a base understanding of containers and how to use Docker.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/devops/argocd/index.html b/devops/argocd/index.html new file mode 100644 index 0000000..0855216 --- /dev/null +++ b/devops/argocd/index.html @@ -0,0 +1,1155 @@ + + + + + + + + + + + + + + + + + + + + + + + Continuous Deployment - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Continuous Deployment

+

Continuous Integration, Delivery, and Deployment are important devOps practices and we often hear a lot about them. These processes are valuable and ensures that the software is up to date timely.

+
    +
  • Continuous Integration is an automation process which allows developers to integrate their work into a repository. When a developer pushes his work into the source code repository, it ensures that the software continues to work properly. It helps to enable collaborative development across the teams and also helps to identify the integration bugs sooner.
  • +
  • Continuous Delivery comes after Continuous Integration. It prepares the code for release. It automates the steps that are needed to deploy a build.
  • +
  • Continuous Deployment is the final step which succeeds Continuous Delivery. It automatically deploys the code whenever a code change is done. Entire process of deployment is automated.
  • +
+

What is GitOps?

+

GitOps in short is a set of practices to use Git pull requests to manage infrastructure and application configurations. Git repository in GitOps is considered the only source of truth and contains the entire state of the system so that the trail of changes to the system state are visible and auditable.

+
    +
  • Traceability of changes in GitOps is no novelty in itself as this approach is almost universally employed for the application source code. However GitOps advocates applying the same principles (reviews, pull requests, tagging, etc) to infrastructure and application +configuration so that teams can benefit from the same assurance as they do for the application source code.
  • +
  • Although there is no precise definition or agreed upon set of rules, the following principles are an approximation of what constitutes a GitOps practice:
  • +
  • Declarative description of the system is stored in Git (configs, monitoring, etc)
  • +
  • Changes to the state are made via pull requests
  • +
  • Git push reconciled with the state of the running system with the state in the Git repository
  • +
+

ArgoCD Overview

+

Presentations

+

GitOps Overview

+

Activities

+

These activities give you a chance to walkthrough building CD pipelines using ArgoCD.

+

These tasks assume that you have: + - Reviewed the Continuous Deployment concept page.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLinkTime
Walkthroughs
GitOpsIntroduction to GitOps with OpenShiftLearn OpenShift GitOps20 min
Try It Yourself
ArgoCD LabLearn how to setup ArgoCD and Deploy ApplicationArgoCD30 min
+

Once you have completed these tasks, you will have created an ArgoCD deployment and have an understanding of Continuous Deployment.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/devops/ibm-toolchain/images/Add_Tool_DP.png b/devops/ibm-toolchain/images/Add_Tool_DP.png new file mode 100644 index 0000000..182c360 Binary files /dev/null and b/devops/ibm-toolchain/images/Add_Tool_DP.png differ diff --git a/devops/ibm-toolchain/images/Add_Tool_Git.png b/devops/ibm-toolchain/images/Add_Tool_Git.png new file mode 100644 index 0000000..edb4559 Binary files /dev/null and b/devops/ibm-toolchain/images/Add_Tool_Git.png differ diff --git a/devops/ibm-toolchain/images/Blank_Template.png b/devops/ibm-toolchain/images/Blank_Template.png new file mode 100644 index 0000000..114a57a Binary files /dev/null and b/devops/ibm-toolchain/images/Blank_Template.png differ diff --git a/devops/ibm-toolchain/images/Continuous_Delivery.png b/devops/ibm-toolchain/images/Continuous_Delivery.png new file mode 100644 index 0000000..a204d4b Binary files /dev/null and b/devops/ibm-toolchain/images/Continuous_Delivery.png differ diff --git a/devops/ibm-toolchain/images/Pipeline_Dashboard.png b/devops/ibm-toolchain/images/Pipeline_Dashboard.png new file mode 100644 index 0000000..c08e451 Binary files /dev/null and b/devops/ibm-toolchain/images/Pipeline_Dashboard.png differ diff --git a/devops/ibm-toolchain/images/Pipeline_Details.png b/devops/ibm-toolchain/images/Pipeline_Details.png new file mode 100644 index 0000000..2e6771d Binary files /dev/null and b/devops/ibm-toolchain/images/Pipeline_Details.png differ diff --git a/devops/ibm-toolchain/images/Region_Select.png b/devops/ibm-toolchain/images/Region_Select.png new file mode 100644 index 0000000..51de3c5 Binary files /dev/null and b/devops/ibm-toolchain/images/Region_Select.png differ diff --git a/devops/ibm-toolchain/images/Tekton_App.png b/devops/ibm-toolchain/images/Tekton_App.png new file mode 100644 index 0000000..dd30488 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_App.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Commit.png b/devops/ibm-toolchain/images/Tekton_Commit.png new file mode 100644 index 0000000..3e21d69 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Commit.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Deployment_Success.png b/devops/ibm-toolchain/images/Tekton_Deployment_Success.png new file mode 100644 index 0000000..19407a0 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Deployment_Success.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Environment.png b/devops/ibm-toolchain/images/Tekton_Environment.png new file mode 100644 index 0000000..615c12a Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Environment.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Files.png b/devops/ibm-toolchain/images/Tekton_Files.png new file mode 100644 index 0000000..af29c18 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Files.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Git_Setup.png b/devops/ibm-toolchain/images/Tekton_Git_Setup.png new file mode 100644 index 0000000..861ae00 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Git_Setup.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Manual_Trigger.png b/devops/ibm-toolchain/images/Tekton_Manual_Trigger.png new file mode 100644 index 0000000..ac40c63 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Manual_Trigger.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Redeploy.png b/devops/ibm-toolchain/images/Tekton_Redeploy.png new file mode 100644 index 0000000..c331489 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Redeploy.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Repo_Definition.png b/devops/ibm-toolchain/images/Tekton_Repo_Definition.png new file mode 100644 index 0000000..47cbf59 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Repo_Definition.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Select.png b/devops/ibm-toolchain/images/Tekton_Select.png new file mode 100644 index 0000000..da36228 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Select.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Success.png b/devops/ibm-toolchain/images/Tekton_Success.png new file mode 100644 index 0000000..c5c9d5f Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Success.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Trigger.png b/devops/ibm-toolchain/images/Tekton_Trigger.png new file mode 100644 index 0000000..0cf1f7f Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Trigger.png differ diff --git a/devops/ibm-toolchain/images/Tekton_Worker.png b/devops/ibm-toolchain/images/Tekton_Worker.png new file mode 100644 index 0000000..a88b590 Binary files /dev/null and b/devops/ibm-toolchain/images/Tekton_Worker.png differ diff --git a/devops/ibm-toolchain/index.html b/devops/ibm-toolchain/index.html new file mode 100644 index 0000000..910a78c --- /dev/null +++ b/devops/ibm-toolchain/index.html @@ -0,0 +1,1462 @@ + + + + + + + + + + + + + + + + + + + + + + + IBM ToolChain - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

IBM ToolChain

+

By following this tutorial, you create an open toolchain that includes a Tekton-based delivery pipeline. You then use the toolchain and DevOps practices to develop a simple "Hello World" web application (app) that you deploy to the IBM Cloud Kubernetes Service.

+

Tekton is an open source, vendor-neutral, Kubernetes-native framework that you can use to build, test, and deploy apps to Kubernetes. Tekton provides a set of shared components for building continuous integration and continuous delivery (CICD) systems. As an open source project, Tekton is managed by the Continuous Delivery Foundation (CDF). The goal is to modernize continuous delivery by providing industry specifications for pipelines, workflows, and other building blocks. With Tekton, you can build, test, and deploy across cloud providers or on-premises systems by abstracting the underlying implementation details. Tekton pipelines are built in to IBM Cloud™ Continuous Delivery..

+

After you create the cluster and the toolchain, you change your app's code and push the change to the Git Repos and Issue Tracking repository (repo). When you push changes to your repo, the delivery pipeline automatically builds and deploys the code.

+

Prerequisites

+
    +
  1. You must have an IBM Cloud account. If you don't have one, sign up for a trial. The account requires an IBMid. If you don't have an IBMid, you can create one when you register.
  2. +
  3. +

    Verify the toolchains and tool integrations that are available in your region and IBM Cloud environment. A toolchain is a set of tool integrations that support development, deployment, and operations tasks.

    +
  4. +
  5. +

    You need a Kubernetes cluster and an API key. You can create them by using either the UI or the CLI. You can create from the IBM Cloud Catalog

    +
  6. +
  7. +

    Create a container registry namespace to deploy the container we are goign to build. Youc an create from the Container Registry UI

    +
  8. +
  9. +

    Create the API key by using the string that is provided for your key name. +

    ibmcloud iam api-key-create my-api-key
    +
    + Save the API key value that is provided by the command.

    +
  10. +
+

Create Continues Delivery Service Instance

+
    +
  1. Open the IBM Cloud Catalog
  2. +
  3. Search for delivery
  4. +
  5. Click on Continuous Delivery + Continuous Delivery
  6. +
  7. Select Dallas Region, as the Tutorial will be using Managed Tekton Worker available in Dallas only.
  8. +
  9. Select a Plan
  10. +
  11. Click Create
  12. +
+

Create an IBM Cloud Toolchain

+

In this task, you create a toolchain and add the tools that you need for this tutorial. Before you begin, you need your API key and Kubernetes cluster name.

+
    +
  1. Open the menu in the upper-left corner and click DevOps. Click ToolChains. Click Create a toolchain. Type in the search box toolchain. Click Build Your Own Toolchain. + Build your own toolchain
  2. +
  3. On the "Build your own toolchain" page, review the default information for the toolchain settings. The toolchain's name identifies it in IBM Cloud. Each toolchain is associated with a specific region and resource group. From the menus on the page, select the region Dallas since we are going to use the Beta Managed Tekton Worker, if you use Private Workers you can use any Region. + Select_Region
  4. +
  5. Click Create. The blank toolchain is created.
  6. +
  7. Click Add a Tool and click Git Repos and Issue Tracking. + Git Repos tile
      +
    • From the Repository type list, select Clone.
    • +
    • In the Source repository URL field, type https://github.com/csantanapr/hello-tekton.git.
    • +
    • Make sure to uncheck the Make this repository private checkbox and that the Track deployment of code changes checkbox is selected. +Git window
    • +
    • Click Create Integration. Tiles for Git Issues and Git Code are added to your toolchain.
    • +
    +
  8. +
  9. Return to your toolchain's overview page.
  10. +
  11. Click Add a Tool. Type pipeline in seach box and click Delivery Pipeline. + Add Tool Delivery Pipeline
      +
    • Type a name for your new pipeline.
    • +
    • Click Tekton. +Pipeline type
    • +
    • Make sure that the Show apps in the View app menu checkbox is selected. All the apps that your pipeline creates are shown in the View App list on the toolchain's Overview page.
    • +
    • Click Create Integration to add the Delivery Pipeline to your toolchain.
    • +
    +
  12. +
  13. Click Delivery Pipeline to open the Tekton Delivery Pipeline dashboard. Click the Definitions tab and complete these tasks:
  14. +
  15. Click Add to add your repository.
  16. +
  17. Specify the Git repo and URL that contains the Tekton pipeline definition and related artifacts. From the list, select the Git repo that you created earlier.
  18. +
  19. Select the branch in your Git repo that you want to use. For this tutorial, use the default value.
  20. +
  21. Specify the directory path to your pipeline definition within the Git repo. You can reference a specific definition within the same repo. For this tutorial, use the default value. + Pipeline window
  22. +
  23. Click Add, then click Save
  24. +
  25. Click the Worker tab and select the private worker that you want to use to run your Tekton pipeline on the associated cluster. Either select the private worker you set up in the previous steps, or select the IBM Managed workers in DALLAS option. + Worker tab
  26. +
  27. Click Save
  28. +
  29. Click the Triggers tab, click Add trigger, and click Git Repository. Associate the trigger with an event listener:
  30. +
  31. From the Repository list, select your repo.
  32. +
  33. Select the When a commit is pushed checkbox, and in the EventListener field, make sure that listener is selected. +Git Repository trigger
  34. +
  35. Click Save
  36. +
  37. On the Triggers tab, click Add trigger and click Manual. Associate that trigger with an event listener:
  38. +
  39. In the EventListener field, make sure that listener is selected.
  40. +
  41. Click Save. + Manual trigger + Note: Manual triggers run when you click Run pipeline and select the trigger. Git repository triggers run when the specified Git event type occurs for the specified Git repo and branch. The list of available event listeners is populated with the listeners that are defined in the pipeline code repo.
  42. +
  43. Click the Environment properties tab and define the environment properties for this tutorial. To add each property, click Add property and click Text property. Add these properties:
  44. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequired?Description
apikeyrequiredType the API key that you created earlier in this tutorial.
clusterOptional (cluster)Type the name of the Kubernetes cluster that you created.
registryNamespacerequiredType the IBM Image Registry namespace where the app image will be built and stored. To use an existing namespace, use the CLI and run ibmcloud cr namespace-list to identify all your current namespaces
repositoryrequiredType the source Git repository where your resources are stored. This value is the URL of the Git repository that you created earlier in this tutorial. To find your repo URL, return to your toolchain and click the Git tile. When the repository is shown, copy the URL.
revisionOptional (master)The Git branch
clusterRegionOptional (us-south)Type the region where your cluster is located.
clusterNamespaceOptional (prod)The namespace in your cluster where the app will be deployed.
registryRegionOptional (us-south)The region where your Image registry is located. To find your registry region, use the CLI and run ibmcloud cr region.
+

Environment properties +12. Click Save

+

Explore the pipeline

+

With a Tekton-based delivery pipeline, you can automate the continuous building, testing, and deployment of your apps.

+

The Tekton Delivery Pipeline dashboard displays an empty table until at least one Tekton pipeline runs. After a Tekton pipeline runs, either manually or as the result of external Git events, the table lists the run, its status, and the last updated time of the run definition.

+

To run the manual trigger that you set up in the previous task, click Run pipeline and select the name of the manual trigger that you created. The pipeline starts to run and you can see the progress on the dashboard. Pipeline runs can be in any of the following states:

+
    +
  • Pending: The PipelineRun definition is queued and waiting to run.
  • +
  • Running: The PipelineRun definition is running in the cluster.
  • +
  • Succeeded: The PipelineRun definition was successfully completed in the cluster.
  • +
  • +

    Failed: The PipelineRun definition run failed. Review the log file for the run to determine the cause. + Pipeline dashboard

    +
  • +
  • +

    For more information about a selected run, click any row in the table. You view the Task definition and the steps in each PipelineRun definition. You can also view the status, logs, and details of each Task definition and step, and the overall status of the PipelineRun definition. + Pipeline Log

    +
  • +
  • +

    The pipeline definition is stored in the pipeline.yaml file in the .tekton folder of your Git repository. Each task has a separate section of this file. The steps for each task are defined in the tasks.yaml file.

    +
  • +
  • +

    Review the pipeline-build-task. The task consists of a git clone of the repository followed by two steps:

    +
      +
    • pre-build-check: This step checks for the mandatory Dockerfile and runs a lint tool. It then checks the registry current plan and quota before it creates the image registry namespace if needed.
    • +
    • build-docker-image: This step creates the Docker image by using the IBM Cloud Container Registry build service through the ibmcloud cr build CLI script.
    • +
    +
  • +
  • Review the pipeline-validate-task. The task consists of a git clone of the repository, followed by the check-vulnerabilities step. This step runs the IBM Cloud Vulnerability Advisor on the image to check for known vulnerabilities. If it finds a vulnerability, the job fails, preventing the image from being deployed. This safety feature prevents apps with security holes from being deployed. The image has no vulnerabilities, so it passes. In this tutorial template, the default configuration of the job is to not block on failure.
  • +
  • Review the pipeline-deploy-task. The task consists of a git clone of the repository followed by two steps:
      +
    • pre-deploy-check: This step checks whether the IBM Container Service cluster is ready and has a namespace that is configured with access to the private image registry by using an IBM Cloud API Key.
    • +
    • deploy-to-kubernetes: This step updates the deployment.yml manifest file with the image url and deploys the application using kubectl apply
    • +
    +
  • +
  • After all the steps in the pipeline are completed, a green status is shown for each task. Click the deploy-to-kubernetes step and click the Logs tab to see the successful completion of this step. + Pipeline success
  • +
  • Scroll to the end of the log. The DEPLOYMENT SUCCEEDED message is shown at the end of the log. + Deployment succeeded
  • +
  • Click the URL to see the running application. + Running app
  • +
+

Modify the App Code

+

In this task, you modify the application and redeploy it. You can see how your Tekton-based delivery pipeline automatically picks up the changes in the application on commit and redeploys the app.

+
    +
  1. On the toolchain's Overview page, click the Git tile for your application.
      +
    • Tip: You can also use the built-in Eclipse Orion-based Web IDE, a local IDE, or your favorite editor to change the files in your repo.
    • +
    +
  2. +
  3. In the repository directory tree, open the app.js file. + File browser
  4. +
  5. Edit the text message code to change the welcome message. + Edit file
  6. +
  7. Commit the updated file by typing a commit message and clicking Commit changes to push the change to the project's remote repository.
  8. +
  9. Return to the toolchain's Overview page by clicking the back arrow.
  10. +
  11. Click Delivery Pipeline. The pipeline is running because the commit automatically started a build. Over the next few minutes, watch your change as it is built, tested, and deployed. + Dashboard redeployment
  12. +
  13. After the deploy-to-kubernetes step is completed, refresh your application URL. The updated message is shown.
  14. +
+

Clean up Resources

+

In this task, you can remove any of the content that is generated by this tutorial. Before you begin, you need the IBM Cloud CLI and the IBM Cloud Kubernetes Service CLI. Instructions to install the CLI are in the prerequisite section of this tutorial.

+
    +
  1. Delete the git repository, sign in into git, select personal projects. Then go to repository General settings and remove the repository.
  2. +
  3. Delete the toolchain. You can delete a toolchain and specify which of the associated tool integrations you want to delete. When you delete a toolchain, the deletion is permanent.
      +
    • On the DevOps dashboard, on the Toolchains page, click the toolchain to delete. Alternatively, on the app's Overview page, on the Continuous delivery card, click View Toolchain.
    • +
    • Click the More Actions menu, which is next to View app.
    • +
    • Click Delete. Deleting a toolchain removes all of its tool integrations, which might delete resources that are managed by those integrations.
    • +
    • Confirm the deletion by typing the name of the toolchain and clicking Delete.
    • +
    • Tip: When you delete a GitHub, GitHub Enterprise, or Git Repos and Issue Tracking tool integration, the associated repo isn't deleted from GitHub, GitHub Enterprise, or Git Repos and Issue Tracking. You must manually remove the repo.
    • +
    +
  4. +
  5. Delete the cluster or discard the namespace from it. It is easiest to delete the entire namespace (Please do not delete the default namespace) by using the IBM Cloud™ Kubernetes Service CLI from a command-line window. However, if you have other resources that you need to keep in the namespace, you need to delete the application resources individually instead of the entire namespace. To delete the entire namespace, enter this command: +
    kubectl delete namespace [not-the-default-namespace]
    +
  6. +
  7. Delete your IBM Cloud API key.
  8. +
  9. From the Manage menu, click Access (IAM). Click IBM Cloud API Keys.
  10. +
  11. Find your API Key in the list and select Delete from the menu to the right of the API Key name.
  12. +
  13. Delete the container images. To delete the images in your container image registry, enter this command in a command-line window: +
    ibmcloud cr image-rm IMAGE [IMAGE...]
    +
    + If you created a registry namespace for the tutorial, delete the entire registry namespace by entering this command: +
    ibmcloud cr namespace-rm NAMESPACE
    +
      +
    • Note: You can run this tutorial many times by using the same registry namespace and cluster parameters without discarding previously generated resources. The generated resources use randomized names to avoid conflicts.
    • +
    +
  14. +
+

Summary

+

You created a toolchain with a Tekton-based delivery pipeline that deploys a "Hello World" app to a secure container in a Kubernetes cluster. You changed a message in the app and tested your change. When you pushed the change to the repo, the delivery pipeline automatically redeployed the app.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/devops/images/Business_devops.png b/devops/images/Business_devops.png new file mode 100644 index 0000000..cdbcf1a Binary files /dev/null and b/devops/images/Business_devops.png differ diff --git a/devops/images/DevOps.jpg b/devops/images/DevOps.jpg new file mode 100644 index 0000000..b123547 Binary files /dev/null and b/devops/images/DevOps.jpg differ diff --git a/devops/images/Tekton_Success.png b/devops/images/Tekton_Success.png new file mode 100644 index 0000000..c5c9d5f Binary files /dev/null and b/devops/images/Tekton_Success.png differ diff --git a/devops/images/cd_process.jpg b/devops/images/cd_process.jpg new file mode 100644 index 0000000..aaabbb2 Binary files /dev/null and b/devops/images/cd_process.jpg differ diff --git a/devops/images/cdply_process.jpg b/devops/images/cdply_process.jpg new file mode 100644 index 0000000..92e0eea Binary files /dev/null and b/devops/images/cdply_process.jpg differ diff --git a/devops/images/ci.jpg b/devops/images/ci.jpg new file mode 100644 index 0000000..70e0dfd Binary files /dev/null and b/devops/images/ci.jpg differ diff --git a/devops/images/ci_process.jpg b/devops/images/ci_process.jpg new file mode 100644 index 0000000..17e593d Binary files /dev/null and b/devops/images/ci_process.jpg differ diff --git a/devops/images/cicd.jpg b/devops/images/cicd.jpg new file mode 100644 index 0000000..1f57737 Binary files /dev/null and b/devops/images/cicd.jpg differ diff --git a/devops/images/devops_architecture.png b/devops/images/devops_architecture.png new file mode 100644 index 0000000..f4fcc8d Binary files /dev/null and b/devops/images/devops_architecture.png differ diff --git a/devops/images/devops_architecture_cn.png b/devops/images/devops_architecture_cn.png new file mode 100644 index 0000000..895fc51 Binary files /dev/null and b/devops/images/devops_architecture_cn.png differ diff --git a/devops/images/devops_twelvefactor.png b/devops/images/devops_twelvefactor.png new file mode 100644 index 0000000..4c99cb6 Binary files /dev/null and b/devops/images/devops_twelvefactor.png differ diff --git a/devops/index.html b/devops/index.html new file mode 100644 index 0000000..07fe8c4 --- /dev/null +++ b/devops/index.html @@ -0,0 +1,1284 @@ + + + + + + + + + + + + + + + + + + + + + + + DevOps Introduction - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DevOps Introduction

+

DevOps has recently become a popular buzzword in the Cloud World. It varies from business to business and it means a lot different things to different people. In traditional IT, organizations have separate teams for Development and Operations. The development team is responsible for coding and operations team is responsible for releasing it to production. When it comes to this two different teams, there will always be some sort of differences. It may be due to the usage of different system environments, software libraries etc. In order to level this up, DevOps came into play.

+

What is DevOps ?

+

“DevOps is a philosophy, a cultural shift that merges operations with development and demands a linked toolchain of technologies to facilitate collaborative change. DevOps toolchains … can include dozens of non-collaborative tools, making the task of automation a technically complex and arduous one.” - Gartner

+

devops

+

These days every business has critical applications which can never go down. Some of the examples are as follows.

+

devops

+

In order to make sure that these applications are up and running smoothly, we need DevOps.

+

Adopting DevOps allows enterprises to create, maintain and improve their applications at a faster pace than the traditional methods. Today, most of the global organizations adopted DevOps.

+

Presentations

+

Tekton Overview

+

GitOps Overview

+

Benefits of DevOps

+
    +
  • Continuous software delivery
  • +
  • High quality software
  • +
  • Increased speed and faster problem resolution
  • +
  • Increased reliability
  • +
  • Easier to manage the software
  • +
  • Collaboration and enhanced team communication
  • +
  • Customer satisfaction etc.
  • +
+

Understanding DevOps

+

Like we mentioned before, often development teams and operation teams are in conflict with each other. Developers keeping changing the software to include new features where as operation engineers wants to keep the system stable.

+
    +
  • Their goals are different.
  • +
  • They use different processes.
  • +
  • They use different tools.
  • +
+

All these may be different reasons for the gap between these two teams.

+

To solve this gap between the two teams, we need DevOps. It closes the gap by aligning incentives and sharing approaches for tools and processes. It helps us to streamline the software delivery process. From the time we begin the project till its delivery, it helps us to improve the cycle time by emphasizing the +learning by gathering feedback from production to development.

+

It includes several aspects like the below.

+
    +
  • Automation - It is quite essential for DevOps. It helps us to gather quick feedback.
  • +
  • Culture - Processes and tools are important. But, people are always more important.
  • +
  • Measurement - Shared incentives are important. Quality is critical.
  • +
  • Sharing - Need a Culture where people can share ideas, processes and tools.
  • +
+

+

+

Where to start ?

+

Understanding the eco system of your software is important. Identify all the environments like dev, test, prod etc. you have in your system and how the delivery happens from end to end.

+
    +
  • Define continuous delivery
  • +
  • Establish proper collaboration between teams
  • +
  • Make sure the teams are on same pace
  • +
  • Identify the pain points in your system and start working on them.
  • +
+

DevOps Best Practices

+

These are some of the standard practices adopted in DevOps.

+
    +
  • Source Code Management
  • +
  • Code Review
  • +
  • Configuration Management
  • +
  • Build Management
  • +
  • Artifact Repository Management
  • +
  • Release Management
  • +
  • Test Automation
  • +
  • Continuous Integration
  • +
  • Continuous Delivery
  • +
  • Continuous Deployment
  • +
  • Infrastructure As Code
  • +
  • Automation
  • +
  • Key Application Performance Monitoring/Indicators
  • +
+

Source Code Management

+

Source Code Management (SCM) systems helps to maintain the code base. It allows multiple developers to work on the code concurrently. It prevents them from overwriting the code and helps them to work in parallel from different locations.

+

Collaboration is an important concept in devOps and SCM helps us to achieve it by coordination of services across the development team. It also tracks co-authoring, collaboration, and individual contributions. It helps the developers to audit the code changes. It also allows rollbacks if required. It also enables backup and allows recovery when required.

+

Code Review

+

Code reviews allows the developer to improve the quality of code. They help us to identify the problems in advance. By reviewing the code, we can fix some of the problems like memory leaks, buffer overflow, formatting errors etc.

+

This process improves the collaboration across the team. Also, code defects are identified and removed before merging them with the main stream there by improving the quality of the code.

+

Configuration Management

+

Configuration Management is managing the configurations by identifying, verifying, and maintaining them. This is done for both software and hardware. The configuration management tools make sure that configurations are properly configured across different systems as per the requirements.

+

This helps to analyze the impact on the systems due to configurations. It makes sure the provisioning is done correctly on different systems like dev, QA, prod etc. It simplifies the coordination between development and operations teams.

+

Build Management

+

Build Management helps to assmble the build environment by packaging all the required components such as the source code, dependencies, etc of the software application together in to a workable unit. Builds can be done manually, on-demand or automated.

+

It ensures that the software is stable and it is reusable. It improves the quality of the software and makes sure it is reliable. It also increases the efficiency.

+

Artifact Repository Management

+

Artifact Repository Management system is used to manage the builds. It is dedicated server which is used to store all the binaries which were outputs of the successful builds.

+

It manages the life cycles of different artifacts. It helps you to easily share the builds across the team. It controls access to the build artifacts by access control.

+

Release Management

+

Release management is a part of software development lifecycle which manages the release from development till deployment to support. Requests keep coming for the addition of the new features. Also, sometimes there may be need to change the existing functionality. This is when the cycle begins for the release management. Once, the new feature or change is approved, it is designed, built, tested, reviewed, and after acceptance, deployed to production. After this, it goes to maintainence and even at this point, there may be need for enhancement. If that is the case, it will be a new cycle again.

+

It helps us to track all the phases and status of deployments in different environments.

+

Test Automation

+

Manual testing takes lots of time. We can automate some of the manual tests which are repetitive, time consuming, and have defined input by test automation.

+

Automatic tests helps to improve the code quality, reduces the amount of time spent on testing, and improves the effectiveness of the overall testing life cycle.

+

Continuous Integration

+

Continuous integration allows the developers to continuously integrate the code they developed. Whenever a latest code change is made and committed to the source control system, the source code is rebuilt and this is then forwarded to testing.

+

With this, the latest code is always available, the builds are faster and the tests are quick.

+

Continuous Delivery

+

Continuous Delivery is the next step to Continuous Integration. In the integration, the code is built and tested. Now in the delivery, this is taken to staging environment. This is done in small frequencies and it makes sure the functionality of the software is stable.

+

It reduces the manual overhead. The code is continuously delivered and constantly reviewed.

+

Continuous Deployment

+

Continuous Deployment comes after Continuous Delivery. In the deployment stage, the code is deployed to the production environment. The entire process is automated in this stage.

+

This allows faster software releases. Improves the collaboration across the teams. Enhances the code quality.

+

Infrastructure As Code

+

Infrastructure as Code is defining the infrastructure services as a software code. they are defines as configuration files. Traditionally, in on-premise application, these are run by system administrators but in cloud, the infrastructure is maintained like any other software code.

+

Helps us to change the system configuration quickly. Tracking is easy and end to end testing is possible. Infrastructure availability is high.

+

Automation

+

Automation is key part to DevOps. Without automation, DevOps is not efficient.

+

Automation comes into play whenever there is a repetitive task. Developers can automate infrastructure, applications, load balancers, etc.

+

Key Application Performance Monitoring/Indicators

+

DevOps is all about measuring the metrics and feedback, with continuous improvement processes. Collecting metrics and monitoring the software plays an important role. Different measures like uptime versus downtime, resolutions time lines etc. helps us to understand the performance of the system.

+

Devops in Twelve factor apps

+

devops

+

If you are new to Twelve factor methodology, have a look here. For more details, checkout Cloud-Native module.

+

DevOps Reference Architecture

+

devops

+
    +
  1. Collaboration tools enable a culture of innovation. Developers, designers, operations teams, and managers must communicate constantly. Development and operations tools must be integrated to post updates and alerts as new builds are completed and deployed and as performance is monitored. The team can discuss the alerts as a group in the context of the tool.
  2. +
  3. As the team brainstorms ideas, responds to feedback and metrics, and fixes defects, team members create work items and rank them in the backlog. The team work on items from the top of the backlog, delivering to production as they complete work.
  4. +
  5. Developers write source code in a code editor to implement the architecture. They construct, change, and correct applications by using various coding models and tools.
  6. +
  7. Developers manage the versions and configuration of assets, merge changes, and manage the integration of changes. The source control tool that a team uses should support social coding.
  8. +
  9. Developers compile, package, and prepare software assets. They need tools that can assess the quality of the code that is being delivered to source control. Those assessments are done before delivery, are associated with automated build systems, and include practices such as code reviews, unit tests, code quality scans, and security scans.
  10. +
  11. Binary files and other output from the build are sent to and managed in a build artifact repository.
  12. +
  13. The release is scheduled. The team needs tools that support release communication and managing, preparing, and deploying releases.
  14. +
  15. The team coordinates the manual and automated processes that are required for the solution to operate effectively. The team must strive towards continuous delivery with zero downtime. A/B deployments can help to gauge the effectiveness of new changes.
  16. +
  17. The team must understand the application and the options for the application's runtime environment, security, management, and release requirements.
  18. +
  19. Depending on the application requirements, some or all of the application stack must be considered, including middleware, the operating system, and virtual machines.
  20. +
  21. The team must ensure that all aspects of the application and its supporting infrastructure are secured.
  22. +
  23. The team plans, configures, monitors, defines criteria, and reports on application availability and performance. Predictive analytics can indicate problems before they occur.
  24. +
  25. The right people on the team or systems are notified when issues occur.
  26. +
  27. The team manages the process for responding to operations incidents, and delivers the changes to fix any incidents.
  28. +
  29. The team uses analytics to learn how users interact with the application and measure success through metrics.
  30. +
  31. When users interact with the application, they can provide feedback on their requirements and how the application is meeting them, which is captured by analytics as well.
  32. +
  33. DevOps engineers manage the entire application lifecycle while they respond to feedback and analytics from the running application.
  34. +
  35. The enterprise network is protected by a firewall and must be accessed through transformation and connectivity services and secure messaging services.
  36. +
  37. The security team uses the user directory throughout the flow. The directory contains information about the user accounts for the enterprise.
  38. +
+

For a cloud native implementation, the reference architecture will be as follows.

+

devops

+

References

+
    +
  • [Michael Hüttermann (2012). DevOps for Developers. Publisher: Apress] (https://learning.oreilly.com/library/view/devops-for-developers/9781430245698/)
  • +
  • [Sricharan Vadapalli (2018). DevOps: Continuous Delivery, Integration, and Deployment with DevOps. Publisher: Packt Publishing] (https://learning.oreilly.com/library/view/devops-continuous-delivery/9781789132991/)
  • +
  • [DevOps Architecture] (https://www.ibm.com/cloud/garage/architectures/devOpsArchitecture/0_1)
  • +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/devops/materials/04-Tekton-Overview.pdf b/devops/materials/04-Tekton-Overview.pdf new file mode 100644 index 0000000..d67d1b8 Binary files /dev/null and b/devops/materials/04-Tekton-Overview.pdf differ diff --git a/devops/materials/04-Tekton-Overview.pptx.zip b/devops/materials/04-Tekton-Overview.pptx.zip new file mode 100644 index 0000000..90cf64f Binary files /dev/null and b/devops/materials/04-Tekton-Overview.pptx.zip differ diff --git a/devops/materials/05-Understanding-GitOps.pdf b/devops/materials/05-Understanding-GitOps.pdf new file mode 100644 index 0000000..e2abb53 Binary files /dev/null and b/devops/materials/05-Understanding-GitOps.pdf differ diff --git a/devops/materials/05-Understanding-GitOps.pptx.zip b/devops/materials/05-Understanding-GitOps.pptx.zip new file mode 100644 index 0000000..93eb8fd Binary files /dev/null and b/devops/materials/05-Understanding-GitOps.pptx.zip differ diff --git a/devops/materials/10-IBM-Cloud-DevOps.pdf b/devops/materials/10-IBM-Cloud-DevOps.pdf new file mode 100644 index 0000000..33cf71e Binary files /dev/null and b/devops/materials/10-IBM-Cloud-DevOps.pdf differ diff --git a/devops/materials/10-IBM-Cloud-DevOps.pptx.zip b/devops/materials/10-IBM-Cloud-DevOps.pptx.zip new file mode 100644 index 0000000..85a152e Binary files /dev/null and b/devops/materials/10-IBM-Cloud-DevOps.pptx.zip differ diff --git a/devops/tekton/index.html b/devops/tekton/index.html new file mode 100644 index 0000000..f73d113 --- /dev/null +++ b/devops/tekton/index.html @@ -0,0 +1,1153 @@ + + + + + + + + + + + + + + + + + + + + + + + Continuous Integration - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Continuous Integration

+

Continuous Integration, Delivery, and Deployment are important devOps practices and we often hear a lot about them. These processes are valuable and ensures that the software is up to date timely.

+
    +
  • Continuous Integration is an automation process which allows developers to integrate their work into a repository. When a developer pushes his work into the source code repository, it ensures that the software continues to work properly. It helps to enable collaborative development across the teams and also helps to identify the integration bugs sooner.
  • +
  • Continuous Delivery comes after Continuous Integration. It prepares the code for release. It automates the steps that are needed to deploy a build.
  • +
  • Continuous Deployment is the final step which succeeds Continuous Delivery. It automatically deploys the code whenever a code change is done. Entire process of deployment is automated.
  • +
+

Tekton Overview

+

Tekton is a cloud-native solution for building CI/CD systems. It consists of Tekton Pipelines, which provides the building blocks, and of supporting components, such as Tekton CLI and Tekton Catalog, that make Tekton a complete ecosystem.

+

Presentations

+

Tekton Overview +IBM Cloud DevOps with Tekton

+

Activities

+

The continuous integration activities focus around Tekton the integration platform. These labs will show you how to build pipelines and test your code before deployment.

+

These tasks assume that you have:

+
    +
  • Reviewed the continuous integration concept page.
  • +
  • Installed Tekton into your cluster.
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLinkTime
Walkthroughs
Deploying Applications From SourceUsing OpenShift 4S2I30 min
Try It Yourself
Tekton LabUsing Tekton to build container imagesTekton1 hour
IBM Cloud DevOpsUsing IBM Cloud ToolChain with TektonTekton on IBM Cloud1 hour
Jenkins LabUsing Jenkins to build and deploy applications.Jenkins1 hour
+

Once you have completed these tasks, you will have an understanding of continuous integration and how to use Tekton to build a pipeline.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/devops/tekton/samples/misc/kubectl-secret.txt b/devops/tekton/samples/misc/kubectl-secret.txt new file mode 100644 index 0000000..743908a --- /dev/null +++ b/devops/tekton/samples/misc/kubectl-secret.txt @@ -0,0 +1,5 @@ +kubectl create secret docker-registry regcred \ + --docker-server=https://index.docker.io/v1/ \ + --docker-username='' \ + --docker-password='' \ + --docker-email='' diff --git a/devops/tekton/samples/misc/sa.yaml b/devops/tekton/samples/misc/sa.yaml new file mode 100644 index 0000000..7ca49b6 --- /dev/null +++ b/devops/tekton/samples/misc/sa.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipeline +secrets: + - name: regcred diff --git a/devops/tekton/samples/pipelines/pipeline.yaml b/devops/tekton/samples/pipelines/pipeline.yaml new file mode 100644 index 0000000..0ebad0c --- /dev/null +++ b/devops/tekton/samples/pipelines/pipeline.yaml @@ -0,0 +1,36 @@ +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: test-build +spec: + params: + - name: repo-url + default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app + - name: revision + default: master + - name: image-server + default: docker.io + - name: image-namespace + default: csantanapr + - name: image-repository + default: cloudnative_sample_app + tasks: + - name: test + taskRef: + name: java-test + params: + - name: url + value: $(params.repo-url) + - name: revision + value: $(params.revision) + - name: build + runAfter: [test] + taskRef: + name: buildah + params: + - name: image + value: $(params.image-server)/$(params.image-namespace)/$(params.image-repository) + - name: url + value: $(params.repo-url) + - name: revision + value: $(params.revision) diff --git a/devops/tekton/samples/pipelines/pipelinerun.yaml b/devops/tekton/samples/pipelines/pipelinerun.yaml new file mode 100644 index 0000000..7aad445 --- /dev/null +++ b/devops/tekton/samples/pipelines/pipelinerun.yaml @@ -0,0 +1,13 @@ +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: test-build-run- +spec: + serviceAccountName: pipeline + pipelineRef: + name: test-build + params: + - name: image-server + value: us.icr.io + - name: image-namespace + value: student01-registry diff --git a/devops/tekton/samples/tasks/task-buildah.yaml b/devops/tekton/samples/tasks/task-buildah.yaml new file mode 100644 index 0000000..8fac91c --- /dev/null +++ b/devops/tekton/samples/tasks/task-buildah.yaml @@ -0,0 +1,56 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: buildah +spec: + params: + - name: BUILDER_IMAGE + description: The location of the buildah builder image. + default: quay.io/buildah/stable:v1.14.8 + - name: STORAGE_DRIVER + description: Set buildah storage driver + default: overlay + - name: DOCKERFILE + description: Path to the Dockerfile to build. + default: ./Dockerfile + - name: CONTEXT + description: Path to the directory to use as context. + default: . + - name: TLSVERIFY + description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry) + default: "false" + - name: FORMAT + description: The format of the built container, oci or docker + default: "oci" + - name: image + - name: url + - name: revision + default: master + steps: + - name: git-clone + image: alpine/git + script: | + git clone -b $(params.revision) --depth 1 $(params.url) /source + volumeMounts: + - name: source + mountPath: /source + - name: build + image: $(params.BUILDER_IMAGE) + workingdir: /source + script: | + echo "Building Image $(params.image)" + buildah --storage-driver=$(params.STORAGE_DRIVER) bud --format=$(params.FORMAT) --tls-verify=$(params.TLSVERIFY) -f $(params.DOCKERFILE) -t $(params.image) $(params.CONTEXT) + echo "Pushing Image $(params.image)" + buildah --storage-driver=$(params.STORAGE_DRIVER) push --tls-verify=$(params.TLSVERIFY) --digestfile ./image-digest $(params.image) docker://$(params.image) + securityContext: + privileged: true + volumeMounts: + - name: varlibcontainers + mountPath: /var/lib/containers + - name: source + mountPath: /source + volumes: + - name: varlibcontainers + emptyDir: {} + - name: source + emptyDir: {} diff --git a/devops/tekton/samples/tasks/task-kaniko.yaml b/devops/tekton/samples/tasks/task-kaniko.yaml new file mode 100644 index 0000000..4cc6d49 --- /dev/null +++ b/devops/tekton/samples/tasks/task-kaniko.yaml @@ -0,0 +1,42 @@ +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: kaniko +spec: + inputs: + params: + - name: DOCKERFILE + description: Path to the Dockerfile to build. + default: ./Dockerfile + - name: CONTEXT + description: The build context used by Kaniko. + default: ./ + - name: EXTRA_ARGS + default: "" + - name: BUILDER_IMAGE + description: The image on which builds will run + default: gcr.io/kaniko-project/executor:v0.16.0 + resources: + - name: source + type: git + + outputs: + resources: + - name: image + type: image + + steps: + - name: build-and-push + workingdir: /workspace/source + image: $(inputs.params.BUILDER_IMAGE) + # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential + # https://github.com/tektoncd/pipeline/pull/706 + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + command: + - /kaniko/executor + - $(inputs.params.EXTRA_ARGS) + - --dockerfile=$(inputs.params.DOCKERFILE) + - --context=/workspace/source/$(inputs.params.CONTEXT) # The user does not need to care the workspace and the source. + - --destination=$(outputs.resources.image.url) diff --git a/devops/tekton/samples/tasks/task-test.yaml b/devops/tekton/samples/tasks/task-test.yaml new file mode 100644 index 0000000..73196dd --- /dev/null +++ b/devops/tekton/samples/tasks/task-test.yaml @@ -0,0 +1,33 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: java-test +spec: + params: + - name: url + - name: revision + default: master + steps: + - name: git-clone + image: alpine/git + script: | + git clone -b $(params.revision) --depth 1 $(params.url) /source + volumeMounts: + - name: source + mountPath: /source + - name: test + image: maven:3.3-jdk-8 + workingdir: /source + script: | + mvn test + echo "tests passed with rc=$?" + volumeMounts: + - name: m2-repository + mountPath: /root/.m2 + - name: source + mountPath: /source + volumes: + - name: m2-repository + emptyDir: {} + - name: source + emptyDir: {} diff --git a/devops/tekton/samples/tasks/taskrun-buildah.yaml b/devops/tekton/samples/tasks/taskrun-buildah.yaml new file mode 100644 index 0000000..5765537 --- /dev/null +++ b/devops/tekton/samples/tasks/taskrun-buildah.yaml @@ -0,0 +1,13 @@ +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + generateName: buildah-task-run- +spec: + serviceAccountName: pipeline + taskRef: + name: buildah + params: + - name: url + value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app + - name: image + value: docker.io/csantanapr/cloudnative_sample_app diff --git a/devops/tekton/samples/tasks/taskrun-test.yaml b/devops/tekton/samples/tasks/taskrun-test.yaml new file mode 100644 index 0000000..d2fd8e2 --- /dev/null +++ b/devops/tekton/samples/tasks/taskrun-test.yaml @@ -0,0 +1,10 @@ +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + generateName: test-task-run- +spec: + taskRef: + name: java-test + params: + - name: url + value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app diff --git a/images/watson-logo.png b/images/watson-logo.png new file mode 100644 index 0000000..23755d2 Binary files /dev/null and b/images/watson-logo.png differ diff --git a/images/watson-white-logo.png b/images/watson-white-logo.png new file mode 100644 index 0000000..135fbbe Binary files /dev/null and b/images/watson-white-logo.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..b513e62 --- /dev/null +++ b/index.html @@ -0,0 +1,956 @@ + + + + + + + + + + + + + + + + + + + + + IBM Cloud Native Bootcamp - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

IBM Cloud Native Bootcamp

+

Concepts Covered

+
    +
  1. Cloud Native - open Cloud Native to read more.
  2. +
  3. Containers - open Containers to read more.
  4. +
  5. Kubernetes - open Kubernetes to read more.
  6. +
  7. DevOps - open Devops to read more. +
  8. +
+

Test your Knowledge

+

After taking the virtual bootcamp with an instructor from IBM Garage or learning on your own self paced you can take the quizzes and even get a Badge to show off your Cloud Native street cred.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/containers/container-registry/index.html b/labs/containers/container-registry/index.html new file mode 100644 index 0000000..bca8e05 --- /dev/null +++ b/labs/containers/container-registry/index.html @@ -0,0 +1,1886 @@ + + + + + + + + + + + + + + + + + + + + + + + Container Registries - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

IBM Container Registries

+

In this lab we are going to create a Container Image and store it in the IBM Cloud Container Registry

+

Prerequisites

+
    +
  • IBM Cloud Account
  • +
+

Login into IBM Cloud

+

Using the IBM Cloud Shell

+
    +
  1. Login into IBM Cloud
  2. +
  3. Select correct account from top right drop down if your IBM id is associated with multiple accounts
  4. +
  5. Click the IBM Cloud Shell Icon on the top right corner of the IBM Cloud Console + ibm cloud shell icon
  6. +
  7. This opens a new browser window with command linux terminal prompt. + ibm cloud shell prompt
  8. +
+

Create a new Container Registry namespace

+
    +
  1. Ensure that you're targeting the correct IBM Cloud Container Registry region. For example for Dallas region use us-south +
    ibmcloud cr region-set us-south
    +
  2. +
  3. Choose a name for your first namespace, and create that namespace. Use this namespace for the rest of the Quick Start.Create a new Container Registry Namespace. This namespace is different from a Kubernetes/OpenShift namespace. The name needs to be all lowercase and globaly unique within a region. +
    ibmcloud cr namespace-add <my_namespace>
    +
    + Now set the environment NAMESPACE to be use for the rest of the lab +
    export NAMESPACE=<my_namespace>
    +
  4. +
+

Building and Pushing a Container Image

+
    +
  1. Clone the following git repository and change directory to 1-containers +
    git clone --depth 1 https://github.com/csantanapr/think2020-nodejs.git my-app
    +cd my-app/1-containers/
    +
  2. +
  3. Inspect the file Dockerfile it contains a multistage build, first layer builds the application, the second copies only the built files. +
    cat Dockerfile
    +
    +
    FROM registry.access.redhat.com/ubi8/nodejs-12 as base
    +
    +FROM base as builder
    +
    +WORKDIR /opt/app-root/src
    +
    +COPY package*.json ./
    +
    +RUN npm ci
    +
    +COPY public public 
    +COPY src src 
    +
    +RUN npm run build
    +
    +FROM base
    +
    +WORKDIR /opt/app-root/src
    +
    +COPY --from=builder  /opt/app-root/src/build build
    +
    +COPY package*.json ./
    +
    +RUN npm ci --only=production
    +
    +COPY --chown=1001:0 server server
    +RUN chmod -R g=u server
    +
    +ENV PORT=8080
    +
    +LABEL com.example.source="https://github.com/csantanapr/think2020-nodejs"
    +LABEL com.example.version="1.0"
    +
    +ARG ENV=production
    +ENV NODE_ENV $ENV
    +ENV NODE_VERSION $NODEJS_VERSION
    +CMD npm run $NODE_ENV
    +
  4. +
  5. Build and push the image, if not already set replace $NAMESPACE with the namespace you added previously, replace us.icr.io if using a different region. +
    ibmcloud cr build --tag us.icr.io/$NAMESPACE/my-app:1.0 ./
    +
  6. +
+

Explore the Container Registry on the IBM Cloud Console

+
    +
  1. Explore the container image details using the IBM Cloud Console. Go to the Main Menu->Kubernetes->Registry you can use the tabs Namespaces, Repository, Images + cr namespace + cr namespace + cr namespace + cr namespace
  2. +
+

Extra Credit (Run Imge on Kubernetes)

+

If you have a Kubernetes Cluster you can run your application image

+
    +
  1. Get the Access token for your Kubernetes cluster, command assumes your cluster name is mycluster +
    ibmcloud ks cluster config -c mycluster
    +
  2. +
  3. Run the following commands to create a deployment using the image we just build. If not already set replace $NAMESPACE with your IBM Container Registry Namespace we stored the image. +
    kubectl create deployment my-app --image us.icr.io/$NAMESPACE/my-app:1.0
    +kubectl rollout status deployment/my-app
    +kubectl port-forward deployment/my-app 8080:8080
    +
    + If the app is connected you should see the following output +
    Forwarding from 127.0.0.1:8080 -> 8080
    +Forwarding from [::1]:8080 -> 8080
    +
  4. +
  5. Open a new Session and run the following command +
    curl localhost:8080 -I
    +
    + You should see in the first line of output the following +
    HTTP/1.1 200 OK
    +
  6. +
  7. +

    To access the app using a browser use the IBM Cloud Shell Web Preview. Click the Web Preview Icon and select port 8080 from the drop down. The application will open in a new browser window. + ibm cloud shell web preview select + web app

    +
  8. +
  9. +

    To stop the application on the terminal with the kubectl port-forward command quit by pressing Ctrl+C in *Session 1

    +
  10. +
+

Delete Deployment and Image

+
    +
  1. Delete the app deployment +
    kubectl delete deployment my-app
    +
  2. +
  3. Delete the container image, if not already set replace $NAMESPACE with the registry namespace +
    ibmcloud cr image-rm us.icr.io/$NAMESPACE/my-app:1.0
    +
  4. +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/containers/images/cr-images.png b/labs/containers/images/cr-images.png new file mode 100644 index 0000000..6978db6 Binary files /dev/null and b/labs/containers/images/cr-images.png differ diff --git a/labs/containers/images/cr-namespaces.png b/labs/containers/images/cr-namespaces.png new file mode 100644 index 0000000..1dcd8e6 Binary files /dev/null and b/labs/containers/images/cr-namespaces.png differ diff --git a/labs/containers/images/cr-repositories.png b/labs/containers/images/cr-repositories.png new file mode 100644 index 0000000..07f2a8c Binary files /dev/null and b/labs/containers/images/cr-repositories.png differ diff --git a/labs/containers/images/cr-settings.png b/labs/containers/images/cr-settings.png new file mode 100644 index 0000000..6507803 Binary files /dev/null and b/labs/containers/images/cr-settings.png differ diff --git a/labs/containers/images/ibmcloud-shell-button.png b/labs/containers/images/ibmcloud-shell-button.png new file mode 100644 index 0000000..d058a89 Binary files /dev/null and b/labs/containers/images/ibmcloud-shell-button.png differ diff --git a/labs/containers/images/ibmcloud-shell-preview.png b/labs/containers/images/ibmcloud-shell-preview.png new file mode 100644 index 0000000..9bd5d1f Binary files /dev/null and b/labs/containers/images/ibmcloud-shell-preview.png differ diff --git a/labs/containers/images/ibmcloud-shell-prompt.png b/labs/containers/images/ibmcloud-shell-prompt.png new file mode 100644 index 0000000..4f87cb8 Binary files /dev/null and b/labs/containers/images/ibmcloud-shell-prompt.png differ diff --git a/labs/containers/images/web-app.png b/labs/containers/images/web-app.png new file mode 100644 index 0000000..ad72fbb Binary files /dev/null and b/labs/containers/images/web-app.png differ diff --git a/labs/containers/index.html b/labs/containers/index.html new file mode 100644 index 0000000..f3023c0 --- /dev/null +++ b/labs/containers/index.html @@ -0,0 +1,2119 @@ + + + + + + + + + + + + + + + + + + + + + + + Docker - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Docker Lab

+

Introduction

+

In this lab, you will learn about how to use docker and how to run applications using docker. This lab will not explicitly give you the commands to progress through these exercises, but will show you a similar expected output.

+

It's your goal to create the commands needed (shown as < command > at each step) to complete the lab.

+

Prerequisites

+
    +
  • Create a Quay account. This account is needed to push images to a container registry. Follow the tutorial to get familiar with interacting with Quay
  • +
  • You need to install Docker in your environment. Follow the instructions here to install it on Mac and here to install it on Windows.
  • +
+

Working with docker

+

Before proceeding, make sure docker is properly installed on your system.

+
    +
  1. Please verify your Docker by looking up the version.
  2. +
+

If it is installed, you will see a version number something similar to below.

+
$ <command>
+Docker version 19.03.0-beta3, build c55e026
+
+

** Running a hello-world container **

+

Let us start with a hello-world container.

+
    +
  1. run a hello-world container.
  2. +
+

If it is successfully run, you will see something like below.

+
$ <command>
+Unable to find image 'hello-world:latest' locally
+latest: Pulling from library/hello-world
+1b930d010525: Pull complete
+Digest: sha256:41a65640635299bab090f783209c1e3a3f11934cf7756b09cb2f1e02147c6ed8
+Status: Downloaded newer image for hello-world:latest
+
+Hello from Docker!
+This message shows that your installation appears to be working correctly.
+
+To generate this message, Docker took the following steps:
+ 1. The Docker client contacted the Docker daemon.
+ 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
+    (amd64)
+ 3. The Docker daemon created a new container from that image which runs the
+    executable that produces the output you are currently reading.
+ 4. The Docker daemon streamed that output to the Docker client, which sent it
+    to your terminal.
+
+To try something more ambitious, you can run an Ubuntu container with:
+ $ docker run -it ubuntu bash
+
+Share images, automate workflows, and more with a free Docker ID:
+ https://hub.docker.com/
+
+For more examples and ideas, visit:
+ https://docs.docker.com/get-started/
+
+

Since, hello-world image is not existing locally, it is pulled from library/hello-world. But if it is already existing, docker will not pull it every time but rather use the existing one.

+

This image is pulled from https://hub.docker.com/_/hello-world. Docker hub is a repository used to store docker images. Similarly, you can use your own registries to store images. For example, IBM Cloud provides you a container registry.

+

Verifying the hello-world image

+
    +
  1. Now verify if an image is existing in your system locally.
  2. +
+

You will then see something like below.

+
$ <command>
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+hello-world         latest              fce289e99eb9        5 months ago        1.84kB
+
+

Get the sample application

+

To get the sample application, you will need to clone it from github.

+
# Clone the sample app
+git clone https://github.com/ibm-cloud-architecture/cloudnative_sample_app.git
+
+# Go to the project's root folder
+cd cloudnative_sample_app/
+
+

Run the application on Docker

+

Build the docker image

+

Let's take look at the docker file before building it.

+
FROM maven:3.3-jdk-8 as builder
+
+COPY . .
+RUN mvn clean install
+
+FROM openliberty/open-liberty:springBoot2-ubi-min as staging
+
+COPY --chown=1001:0 --from=builder /target/cloudnativesampleapp-1.0-SNAPSHOT.jar /config/app.jar
+RUN springBootUtility thin \
+    --sourceAppPath=/config/app.jar \
+    --targetThinAppPath=/config/dropins/spring/thinClinic.jar \
+    --targetLibCachePath=/opt/ol/wlp/usr/shared/resources/lib.index.cache
+
+
    +
  • Using the FROM instruction, we provide the name and tag of an image that should be used as our base. This must always be the first instruction in the Dockerfile.
  • +
  • Using COPY instruction, we copy new contents from the source filesystem to the container filesystem.
  • +
  • RUN instruction executes the commands.
  • +
+

This Dockerfile leverages multi-stage builds, which lets you create multiple stages in your Dockerfile to do certain tasks.

+

In our case, we have two stages.

+
    +
  • The first one uses maven:3.3-jdk-8 as its base image to download and build the project and its dependencies using Maven.
  • +
  • The second stage uses openliberty/open-liberty:springBoot2-ubi-min as its base image to run the compiled code from the previous stage.
  • +
+

The advantage of using the multi-stage builds approach is that the resulting image only uses the base image of the last stage. Meaning that in our case, we will only end up with the openliberty/open-liberty:springBoot2-ubi-min as our base image, which is much tinier than having an image that has both Maven and the JRE.

+

By using the multi-stage builds approach when it makes sense to use it, you will end up with much lighter and easier to maintain images, which can save you space on your Docker Registry. Also, having tinier images usually means less resource consumption on your worker nodes, which can result cost-savings.

+

Once, you have the docker file ready, the next step is to build it. The build command allows you to build a docker image which you can later run as a container.

+
    +
  1. Build the docker file with the image_name of greeting and give it a image_tag of v1.0.0 and build it using the current context.
  2. +
+

You will see something like below:

+
$ <command>
+Sending build context to Docker daemon  22.17MB
+Step 1/6 : FROM maven:3.3-jdk-8 as builder
+ ---> 9997d8483b2f
+Step 2/6 : COPY . .
+ ---> c198e3e54023
+Step 3/6 : RUN mvn clean install
+ ---> Running in 24378df7f87b
+[INFO] Scanning for projects...
+.
+.
+.
+[INFO] Installing /target/cloudnativesampleapp-1.0-SNAPSHOT.jar to /root/.m2/repository/projects/cloudnativesampleapp/1.0-SNAPSHOT/cloudnativesampleapp-1.0-SNAPSHOT.jar
+[INFO] Installing /pom.xml to /root/.m2/repository/projects/cloudnativesampleapp/1.0-SNAPSHOT/cloudnativesampleapp-1.0-SNAPSHOT.pom
+[INFO] ------------------------------------------------------------------------
+[INFO] BUILD SUCCESS
+[INFO] ------------------------------------------------------------------------
+[INFO] Total time: 44.619 s
+[INFO] Finished at: 2020-04-06T16:07:04+00:00
+[INFO] Final Memory: 38M/385M
+[INFO] ------------------------------------------------------------------------
+Removing intermediate container 24378df7f87b
+ ---> cc5620334e1b
+Step 4/6 : FROM openliberty/open-liberty:springBoot2-ubi-min as staging
+ ---> 021530b0b3cb
+Step 5/6 : COPY --chown=1001:0 --from=builder /target/cloudnativesampleapp-1.0-SNAPSHOT.jar /config/app.jar
+ ---> dbc81e5f4691
+Step 6/6 : RUN springBootUtility thin     --sourceAppPath=/config/app.jar     --targetThinAppPath=/config/dropins/spring/thinClinic.jar     --targetLibCachePath=/opt/ol/wlp/usr/shared/resources/lib.index.cache
+ ---> Running in 8ea80b5863cb
+Creating a thin application from: /config/app.jar
+Library cache: /opt/ol/wlp/usr/shared/resources/lib.index.cache
+Thin application: /config/dropins/spring/thinClinic.jar
+Removing intermediate container 8ea80b5863cb
+ ---> a935a129dcb2
+Successfully built a935a129dcb2
+Successfully tagged greeting:v1.0.0
+
+
    +
  1. Next, verify your newly built image
  2. +
+

The output will be as follows.

+
$ <command>
+REPOSITORY                           TAG                   IMAGE ID            CREATED             SIZE
+greeting                             v1.0.0                89bd7032fdee        51 seconds ago      537MB
+openliberty/open-liberty             springBoot2-ubi-min   bcfcb2c5ce16        6 days ago          480MB
+hello-world                          latest                f9cad508cb4c        5 months ago        1.84kB
+
+

Run the docker container

+

Now let's try running the docker container. Run it with the following parameters:

+
    +
  1. Expose port 9080. Run it in the background in detached mode. Give the container the name of greeting.
  2. +
+

Once done, you will have something like below.

+
$ <command>
+bc2dc95a6bd1f51a226b291999da9031f4443096c1462cb3fead3df36613b753
+
+

Also, docker cannot create two containers with the same name. If you try to run the same container having the same name again, you will see something like below.

+
$ <command>
+docker: Error response from daemon: Conflict. The container name "/greeting" is already in use by container "a74b91789b29af6e7be92b30d0e68eef852bfb24336a44ef1485bb58becbd664". You have to remove (or rename) that container to be able to reuse that name.
+See 'docker run --help'.
+
+

It is a good practice to name your containers. Naming helps you to discover your service easily.

+
    +
  1. List all the running containers.
  2. +
+

You will see something like below.

+
$ <command>
+CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                              NAMES
+bc2dc95a6bd1        greeting:v1.0.0     "/opt/ol/helpers/run…"   18 minutes ago      Up 18 minutes       0.0.0.0:9080->9080/tcp, 9443/tcp   greeting
+
+
    +
  1. Let's inspect the running container.
  2. +
+

By inspecting the container, you can access detailed information about the container. By using this command, you get to know the details about network settings, volumes, configs, state etc.

+

If we consider our container, it is as follows. You can see lot of information about the greeting container.

+
$ <command>
+[
+    {
+        "Id": "bc2dc95a6bd1f51a226b291999da9031f4443096c1462cb3fead3df36613b753",
+        "Created": "2019-08-30T16:56:40.2081539Z",
+        "Path": "/opt/ol/helpers/runtime/docker-server.sh",
+        "Args": [
+            "/opt/ol/wlp/bin/server",
+            "run",
+            "defaultServer"
+        ],
+        "State": {
+            "Status": "running",
+            "Running": true,
+            "Paused": false,
+            "Restarting": false,
+            "OOMKilled": false,
+            "Dead": false,
+            "Pid": 27548,
+            "ExitCode": 0,
+            "Error": "",
+            "StartedAt": "2019-08-30T16:56:41.0927889Z",
+            "FinishedAt": "0001-01-01T00:00:00Z"
+        },
+        ..........
+        ..........
+        ..........
+    }
+]
+
+
    +
  1. Get the logs of the greeting container.
  2. +
+

It helps you to access the logs of your container. It allows you to debug the container if it fails. It also lets you to know what is happening with your application.

+

At the end, you will see something like below.

+
.   ____          _            __ _ _
+/\\ / ___'_ __ _ _(_)_ __  __ _ \ \ \ \
+( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
+\\/  ___)| |_)| | | | | || (_| |  ) ) ) )
+'  |____| .__|_| |_|_| |_\__, | / / / /
+=========|_|==============|___/=/_/_/_/
+:: Spring Boot ::        (v2.1.7.RELEASE)
+2019-08-30 16:57:01.494  INFO 1 --- [ecutor-thread-5] application.SBApplication                : Starting SBApplication on bc2dc95a6bd1 with PID 1 (/opt/ol/wlp/usr/servers/defaultServer/dropins/spring/thinClinic.jar started by default in /opt/ol/wlp/output/defaultServer)
+2019-08-30 16:57:01.601  INFO 1 --- [ecutor-thread-5] application.SBApplication                : No active profile set, falling back to default profiles: default
+[AUDIT   ] CWWKT0016I: Web application available (default_host): http://bc2dc95a6bd1:9080/
+2019-08-30 16:57:09.641  INFO 1 --- [cutor-thread-25] o.s.web.context.ContextLoader            : Root WebApplicationContext: initialization completed in 7672 ms
+2019-08-30 16:57:12.279  INFO 1 --- [ecutor-thread-5] o.s.b.a.e.web.EndpointLinksResolver      : Exposing 15 endpoint(s) beneath base path '/actuator'
+2019-08-30 16:57:12.974  INFO 1 --- [ecutor-thread-5] o.s.s.concurrent.ThreadPoolTaskExecutor  : Initializing ExecutorService 'applicationTaskExecutor'
+2019-08-30 16:57:13.860  INFO 1 --- [ecutor-thread-5] d.s.w.p.DocumentationPluginsBootstrapper : Context refreshed
+2019-08-30 16:57:13.961  INFO 1 --- [ecutor-thread-5] d.s.w.p.DocumentationPluginsBootstrapper : Found 1 custom documentation plugin(s)
+2019-08-30 16:57:14.020  INFO 1 --- [ecutor-thread-5] s.d.s.w.s.ApiListingReferenceScanner     : Scanning for api listing references
+2019-08-30 16:57:14.504  INFO 1 --- [ecutor-thread-5] application.SBApplication                : Started SBApplication in 17.584 seconds (JVM running for 33.368)
+[AUDIT   ] CWWKZ0001I: Application thinClinic started in 21.090 seconds.
+[AUDIT   ] CWWKF0012I: The server installed the following features: [el-3.0, jsp-2.3, servlet-4.0, springBoot-2.0, ssl-1.0, transportSecurity-1.0, websocket-1.1].
+[AUDIT   ] CWWKF0011I: The defaultServer server is ready to run a smarter planet. The defaultServer server started in 33.103 seconds.
+
+

This shows that the Spring Boot application is successfully started.

+

Access the application

+
    +
  • To access the application, open the browser and access http://localhost:9080/greeting?name=John.
  • +
+

You will see something like below.

+
{"id":2,"content":"Welcome to Cloudnative bootcamp !!! Hello, John :)"}
+
+

Container Image Registry

+

Container Image Registry is a place where you can store the container images. They can be public or private registries. They can be hosted by third party as well. In this lab, we are using Quay.

+

Pushing an image to a Registry

+

Let us now push the image to the Quay registry. Before pushing the image to the registry, one needs to login.

+
    +
  1. Login to Quay using your credentials.
  2. +
+

Once logged in we need to take the image for the registry.

+
    +
  1. +

    Tag your image for the image registry using the same name and tag from before. Be sure to include the host name of the target image registry in the destination tag (e.g. quay.io). NOTE: the tag command has both the source tag and repository destination tag in it.

    +
  2. +
  3. +

    Now push the image to the registry. This allows you to share images to a registry.

    +
  4. +
+

If everything goes fine, you will see something like below.

+
$ <command>
+The push refers to repository [quay.io/<repository_name>/greeting]
+2e4d09cd03a2: Pushed
+d862b7819235: Pushed
+a9212239031e: Pushed
+4be784548734: Pushed
+a43c287826a1: Mounted from library/ibmjava
+e936f9f1df3e: Mounted from library/ibmjava
+92d3f22d44f3: Mounted from library/ibmjava
+10e46f329a25: Mounted from library/ibmjava
+24ab7de5faec: Mounted from library/ibmjava
+1ea5a27b0484: Mounted from library/ibmjava
+v1.0.0: digest: sha256:21c2034646a31a18b053546df00d9ce2e0871bafcdf764f872a318a54562e6b4 size: 2415
+
+

Once the push is successful, your image will be residing in the registry.

+

Clean Up

+
    +
  1. +

    Stop the greeting container.

    +
  2. +
  3. +

    Remove the container.

    +
  4. +
  5. +

    Remove the image. (NOTE: You will need the image_id to remove it.)

    +
  6. +
+

Pulling an image from the registry

+

Sometimes, you may need the images that are residing on your registry. Or you may want to use some public images out there. Then, we need to pull the image from the registry.

+
    +
  1. Pull the image greeting from the registry,
  2. +
+

If it successfully got pulled, we will see something like below.

+
ddcb5f219ce2: Pull complete
+e3371bbd24a0: Pull complete
+49d2efb3c01b: Pull complete
+Digest: sha256:21c2034646a31a18b053546df00d9ce2e0871bafcdf764f872a318a54562e6b4
+Status: Downloaded newer image for <repository_name>/greeting:v1.0.0
+docker.io/<repository_name>/greeting:v1.0.0
+
+

Conclusion

+

You have successfully completed this lab! Let's take a look at what you learned and did today:

+
    +
  • Learned about Dockerfile.
  • +
  • Learned about docker images.
  • +
  • Learned about docker containers.
  • +
  • Learned about multi-stage docker builds.
  • +
  • Ran the Greetings service on Docker.
  • +
+

Congratulations !!!

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/devops/argocd/index.html b/labs/devops/argocd/index.html new file mode 100644 index 0000000..4f459e0 --- /dev/null +++ b/labs/devops/argocd/index.html @@ -0,0 +1,2143 @@ + + + + + + + + + + + + + + + + + + + + + ArgoCD - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

ArgoCD Lab

+
+
+
+

OpenShift

+

Pre-requisites

+

Make sure your environment is setup properly for the lab.

+

Check the Environment Setup page for your setup.

+

ArgoCD Installation

+
    +
  • Create the namespace argocd to install argocd +
    oc new-project argocd
    +
  • +
  • Install ArgoCD as follows. +
    oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argocd-operator.yaml
    +
  • +
  • When installing the tutorial, make sure you wait until the argocd-operator is finished before installing the argocd-cr..or it will fail. You can do this: +
    oc get ClusterServiceVersion -n argocd
    +NAME                                   DISPLAY                        VERSION   REPLACES   PHASE
    +argocd-operator.v0.0.8                 Argo CD                        0.0.8                Succeeded
    +
    + and wait for the "succeeded" to come up before proceeding. +
    oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argocd-cr.yaml
    +
    + and wait for the argocd server Pod to be running +
    oc get pods -n argocd -l app.kubernetes.io/name=example-argocd-server
    +
    +
    NAME                                     READY   STATUS    RESTARTS   AGE
    +example-argocd-server-57c4fd5c45-zf4q6   1/1     Running   0          115s
    +
  • +
  • Install the argocd CLI, for example on OSX use brew +
    brew tap argoproj/tap
    +brew install argoproj/tap/argocd
    +
  • +
  • Set an environment variable ARGOCD_URL using the EXTERNAL-IP +
    export ARGOCD_NAMESPACE="argocd"
    +export ARGOCD_SERVER=$(oc get route example-argocd-server -n $ARGOCD_NAMESPACE -o jsonpath='{.spec.host}')
    +export ARGOCD_URL="https://$ARGOCD_SERVER"
    +echo ARGOCD_URL=$ARGOCD_URL
    +echo ARGOCD_SERVER=$ARGOCD_SERVER
    +
  • +
+

Deploying the app

+
    +
  • Login into the UI. +
    open $ARGOCD_URL
    +
  • +
  • Use admin as the username and get the password with the following command +
    oc get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\.password}' | base64 -d
    +
    + For example the output is similar to this: +
    tyafMb7BNvO0kP9eizx3CojrK8pYJFQq
    +
  • +
+

ArgoCD Login

+
    +
  • Now go back to the ArgoCD home and click on NEW APP.
  • +
  • Add the below details:
  • +
  • Application Name: sample
  • +
  • Project - default
  • +
  • SYNC POLICY: Manual
  • +
  • REPO URL: https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy
  • +
  • Revision: HEAD
  • +
  • Path: openshift
  • +
+

app details one

+
    +
  • Cluster - Select the default one https://kubernetes.default.svc to deploy in-cluster
  • +
  • Namespace - default
  • +
  • Click Create to finish
  • +
+

app details two

+
    +
  • You will now see the available apps.
  • +
+

sampleapp create

+
    +
  • Initially, the app will be out of sync. It is yet to be deployed. You need to sync it for deploying.
  • +
+

To sync the application, click SYNC and then SYNCHRONIZE.

+

out of sync

+
    +
  • Wait till the app is deployed.
  • +
+

synched app

+
    +
  • Once the app is deployed, click on it to see the details.
  • +
+

sample app deployed

+

sample app full deployment

+

Verifying the deployment

+
    +
  • Access the app to verify if it is correctly deployed.
  • +
  • List the cloudnativesampleapp-service route +
    oc get route
    +
    + It should have an IP under EXTERNAL-IP column +
    NAME                 HOST/PORT                                     PATH   SERVICES                       PORT   TERMINATION   WILDCARD
    +cloudnative-sample   cloudnative-sample-default.apps-crc.testing          cloudnativesampleapp-service   9080                 None
    +
  • +
  • Set an environment variable APP_URL using the EXTERNAL-IP +
    export APP_URL="http://$(oc get route cloudnative-sample -o jsonpath='{.status.ingress[0].host}')"
    +echo ARGOCD_SERVER=$APP_URL
    +
  • +
  • Access the url using curl +
    curl "$APP_URL/greeting?name=Carlos"
    +
    +
    {"id":2,"content":"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)"}
    +
  • +
+

Using the ArgoCD CLI

+
    +
  • Login using the cli.
  • +
  • Use admin as the username and get the password with the following command +
    export ARGOCD_PASSWORD=$(oc get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\.password}' | base64 -d)
    +echo $ARGOCD_PASSWORD
    +
  • +
  • Now login as follows. +
    argocd login --username admin --password $ARGOCD_PASSWORD $ARGOCD_SERVER
    +
    +
    WARNING: server certificate had error: x509: cannot validate certificate for 10.97.240.99 because it doesn't contain 
    +any IP SANs. Proceed insecurely (y/n)? y
    +
    +'admin' logged in successfully
    +Context 'example-argocd-server-argocd.apps-crc.testing' updated
    +
  • +
  • List the applications +
    argocd app list
    +
    +
    NAME    CLUSTER                         NAMESPACE  PROJECT  STATUS  HEALTH   SYNCPOLICY  CONDITIONS  REPO                                                                     PATH   TARGET
    +sample  https://kubernetes.default.svc  default    default  Synced  Healthy  <none>      <none>      https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy  openshift  HEAD
    +
  • +
  • Get application details +
    argocd app get sample
    +
    +
    Name:               sample
    +Project:            default
    +Server:             https://kubernetes.default.svc
    +Namespace:          default
    +URL:                https://10.97.240.99/applications/sample
    +Repo:               https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy
    +Target:             HEAD
    +Path:               openshift
    +SyncWindow:         Sync Allowed
    +Sync Policy:        <none>
    +Sync Status:        Synced to HEAD (9684037)
    +Health Status:      Healthy
    +
    +GROUP  KIND        NAMESPACE  NAME                             STATUS  HEALTH   HOOK  MESSAGE
    +    Service     default    cloudnativesampleapp-service     Synced  Healthy        service/cloudnativesampleapp-service created
    +apps   Deployment  default    cloudnativesampleapp-deployment  Synced  Healthy        deployment.apps/cloudnativesampleapp-deployment created
    +
  • +
  • Show application deployment history +
    argocd app history sample
    +
    +
    ID  DATE                           REVISION
    +0   2020-02-12 21:10:32 -0500 EST  HEAD (9684037)
    +
  • +
+

References

+ +
+
+

Kubernetes

+

Pre-requisites

+

Make sure your environment is setup properly for the lab.

+

Check the Environment Setup page for your setup.

+

ArgoCD Installation

+
    +
  • Create the namespace argocd to install argocd +
    kubectl create namespace argocd
    +export ARGOCD_NAMESPACE=argocd
    +
  • +
  • +

    Create RBAC resources +

    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/service_account.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/role.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/role_binding.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argo-clusteradmin.yaml
    +

    +
  • +
  • +

    Install CRDs +

    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/argo-cd/argoproj.io_applications_crd.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/argo-cd/argoproj.io_appprojects_crd.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/crds/argoproj.io_argocdexports_crd.yaml
    +kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/crds/argoproj.io_argocds_crd.yaml
    +
    + Verify CRDs +
    kubectl get crd -n argocd
    +
    +
    NAME                        CREATED AT
    +applications.argoproj.io    2020-05-15T02:05:55Z
    +appprojects.argoproj.io     2020-05-15T02:05:56Z
    +argocdexports.argoproj.io   2020-05-15T02:08:21Z
    +argocds.argoproj.io         2020-05-15T02:08:21Z
    +

    +
  • +
  • Deploy Operator +
    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/operator.yaml
    +
  • +
  • Deploy ArgoCD CO +
    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/examples/argocd-lb.yaml
    +
    + Verify that ArgoCD Pods are running +
    kubectl get pods -n argocd
    +
    +
    NAME                                                     READY   STATUS    RESTARTS   AGE
    +argocd-operator-5f7d8cf7d8-486vn                         1/1     Running   0          3m46s
    +example-argocd-application-controller-7dc5fcb75d-xkk5h   1/1     Running   0          2m3s
    +example-argocd-dex-server-bb9df96cb-ndmhl                1/1     Running   0          2m3s
    +example-argocd-redis-756b6764-sb2gt                      1/1     Running   0          2m3s
    +example-argocd-repo-server-75944fcf87-zmh48              1/1     Running   0          2m3s
    +example-argocd-server-747b684c8c-xhgl9                   1/1     Running   0          2m3s
    +
    + Verify that the other ArgoCD resources are created +
    kubectl get cm,secret,svc,deploy -n argocd
    +
  • +
  • +

    List the argocd-server service +

    kubectl get svc example-argocd-server -n argocd
    +

    +
  • +
  • +

    From the script, the Argo Server service has a type of LoadBalancer. If the ExternalIP is in a pending state, then there is no loadBalancer for your cluster, so we only need the the ArgoCD server's NodePort. Otherwise use the ExternalIP and NodePort to access Argo. +

    NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                      AGE
    +example-argocd-server   LoadBalancer   10.105.73.245   <pending>   80:31138/TCP,443:31932/TCP   5m3s
    +

    +
  • +
  • +

    To access the service we need the Node's External IP and the NodePort. Let's set an environment variable ARGOCD_URL with NODE_EXTERNAL_IP:NodePort. +

    export NODE_EXTERNAL_IP="$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}')"
    +export ARGOCD_NODEPORT="$(kubectl get svc example-argocd-server -n $ARGOCD_NAMESPACE -o jsonpath='{.spec.ports[0].nodePort}')"
    +export ARGOCD_URL="https://$NODE_EXTERNAL_IP:$ARGOCD_NODEPORT"
    +echo ARGOCD_URL=$ARGOCD_URL
    +

    +
  • +
  • +

    If you can't access the NodePort from your computer and only http/80 then edit the argocd-server and add the flag --insecure +

    kubectl edit -n argocd deployment example-argocd-server
    +
    + Use the kube api to proxy into the argocd server using kubectl port-forward +
    kubectl port-forward service/example-argocd-server 8080:80 -n argocd
    +
    + Then you can access the argocd server locally on port 8080 http://localhost:8080

    +
  • +
+

Deploying the app

+
    +
  • Login using the Browser into the UI using $ARGOCD_URL or localhost:8080 if using port-forward
  • +
  • Use admin as the username and get the password with the following command +
    kubectl get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\.password}' | base64 -d
    +
    + For example the output is similar to this: +
    tyafMb7BNvO0kP9eizx3CojrK8pYJFQq
    +
  • +
+

ArgoCD Login

+
    +
  • Now go back to the ArgoCD home and click on NEW APP.
  • +
  • Add the below details:
  • +
  • Application Name: sample
  • +
  • Project - default
  • +
  • SYNC POLICY: Manual
  • +
  • REPO URL: https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy
  • +
  • Revision: HEAD
  • +
  • Path: kubernetes
  • +
+

app details one

+
    +
  • Cluster - Select the default one https://kubernetes.default.svc to deploy in-cluster
  • +
  • Namespace - default
  • +
  • Click Create to finish
  • +
+

app details two

+
    +
  • You will now see the available apps.
  • +
+

sampleapp create

+
    +
  • Initially, the app will be out of sync. It is yet to be deployed. You need to sync it for deploying.
  • +
+

To sync the application, click SYNC and then SYNCHRONIZE.

+

out of sync

+
    +
  • Wait till the app is deployed.
  • +
+

synched app

+
    +
  • Once the app is deployed, click on it to see the details.
  • +
+

sample app deployed

+

sample app full deployment

+

Verifying the deployment

+
    +
  • Access the app to verify if it is correctly deployed.
  • +
  • List the cloudnativesampleapp-service service +
    kubectl get svc cloudnativesampleapp-service
    +
    + It will have the NodePort for the application. In this case, it is 30499. +
    NAME                           TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
    +cloudnativesampleapp-service   NodePort   172.21.118.165   <none>        9080:30499/TCP   20s
    +
  • +
  • Set an environment variable APP_URL using the Node's IP and NodePort values +
    export APP_NODE_PORT="$(kubectl get svc cloudnativesampleapp-service -n default -o jsonpath='{.spec.ports[0].nodePort}')"
    +export APP_URL="$NODE_EXTERNAL_IP:$APP_NODE_PORT"
    +echo Application=$APP_URL
    +
  • +
  • Access the url using curl +
    curl "$APP_URL/greeting?name=Carlos"
    +
    +
    {"id":2,"content":"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)"}
    +
  • +
+

References

+ +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/devops/ibm-toolchain/index.html b/labs/devops/ibm-toolchain/index.html new file mode 100644 index 0000000..3fff2d7 --- /dev/null +++ b/labs/devops/ibm-toolchain/index.html @@ -0,0 +1,1944 @@ + + + + + + + + + + + + + + + + + + + + + + + IBM Cloud - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

IBM Toolchain Lab

+

By following this tutorial, you create an open toolchain that includes a Tekton-based delivery pipeline. You then use the toolchain and DevOps practices to develop a simple "Hello World" web application (app) that you deploy to the IBM Cloud Kubernetes Service.

+

Tekton is an open source, vendor-neutral, Kubernetes-native framework that you can use to build, test, and deploy apps to Kubernetes. Tekton provides a set of shared components for building continuous integration and continuous delivery (CICD) systems. As an open source project, Tekton is managed by the Continuous Delivery Foundation (CDF). The goal is to modernize continuous delivery by providing industry specifications for pipelines, workflows, and other building blocks. With Tekton, you can build, test, and deploy across cloud providers or on-premises systems by abstracting the underlying implementation details. Tekton pipelines are built in to IBM Cloud™ Continuous Delivery..

+

After you create the cluster and the toolchain, you change your app's code and push the change to the Git Repos and Issue Tracking repository (repo). When you push changes to your repo, the delivery pipeline automatically builds and deploys the code.

+

Prerequisites

+
    +
  1. You must have an IBM Cloud account. If you don't have one, sign up for a trial. The account requires an IBMid. If you don't have an IBMid, you can create one when you register.
  2. +
  3. +

    Verify the toolchains and tool integrations that are available in your region and IBM Cloud environment. A toolchain is a set of tool integrations that support development, deployment, and operations tasks.

    +
  4. +
  5. +

    You need a Kubernetes cluster and an API key. You can create them by using either the UI or the CLI. You can create from the IBM Cloud Catalog

    +
  6. +
  7. +

    Create a container registry namespace to deploy the container we are goign to build. Youc an create from the Container Registry UI

    +
  8. +
  9. +

    Create the API key by using the string that is provided for your key name. +

    ibmcloud iam api-key-create my-api-key
    +
    + Save the API key value that is provided by the command.

    +
  10. +
+

Create Continues Delivery Service Instance

+
    +
  1. Open the IBM Cloud Catalog
  2. +
  3. Search for delivery
  4. +
  5. Click on Continuous Delivery + Continuous Delivery
  6. +
  7. Select Dallas Region, as the Tutorial will be using Managed Tekton Worker available in Dallas only.
  8. +
  9. Select a Plan
  10. +
  11. Click Create
  12. +
+

Create an IBM Cloud Toolchain

+

In this task, you create a toolchain and add the tools that you need for this tutorial. Before you begin, you need your API key and Kubernetes cluster name.

+
    +
  1. Open the menu in the upper-left corner and click DevOps. Click ToolChains. Click Create a toolchain. Type in the search box toolchain. Click Build Your Own Toolchain. + Build your own toolchain
  2. +
  3. On the "Build your own toolchain" page, review the default information for the toolchain settings. The toolchain's name identifies it in IBM Cloud. Each toolchain is associated with a specific region and resource group. From the menus on the page, select the region Dallas since we are going to use the Beta Managed Tekton Worker, if you use Private Workers you can use any Region. + Select_Region
  4. +
  5. Click Create. The blank toolchain is created.
  6. +
  7. Click Add a Tool and click Git Repos and Issue Tracking. + Git Repos tile
      +
    • From the Repository type list, select Clone.
    • +
    • In the Source repository URL field, type https://github.com/csantanapr/hello-tekton.git.
    • +
    • Make sure to uncheck the Make this repository private checkbox and that the Track deployment of code changes checkbox is selected. +Git window
    • +
    • Click Create Integration. Tiles for Git Issues and Git Code are added to your toolchain.
    • +
    +
  8. +
  9. Return to your toolchain's overview page.
  10. +
  11. Click Add a Tool. Type pipeline in seach box and click Delivery Pipeline. + Add Tool Delivery Pipeline
      +
    • Type a name for your new pipeline.
    • +
    • Click Tekton. +Pipeline type
    • +
    • Make sure that the Show apps in the View app menu checkbox is selected. All the apps that your pipeline creates are shown in the View App list on the toolchain's Overview page.
    • +
    • Click Create Integration to add the Delivery Pipeline to your toolchain.
    • +
    +
  12. +
  13. Click Delivery Pipeline to open the Tekton Delivery Pipeline dashboard. Click the Definitions tab and complete these tasks:
  14. +
  15. Click Add to add your repository.
  16. +
  17. Specify the Git repo and URL that contains the Tekton pipeline definition and related artifacts. From the list, select the Git repo that you created earlier.
  18. +
  19. Select the branch in your Git repo that you want to use. For this tutorial, use the default value.
  20. +
  21. Specify the directory path to your pipeline definition within the Git repo. You can reference a specific definition within the same repo. For this tutorial, use the default value. + Pipeline window
  22. +
  23. Click Add, then click Save
  24. +
  25. Click the Worker tab and select the private worker that you want to use to run your Tekton pipeline on the associated cluster. Either select the private worker you set up in the previous steps, or select the IBM Managed workers in DALLAS option. + Worker tab
  26. +
  27. Click Save
  28. +
  29. Click the Triggers tab, click Add trigger, and click Git Repository. Associate the trigger with an event listener:
  30. +
  31. From the Repository list, select your repo.
  32. +
  33. Select the When a commit is pushed checkbox, and in the EventListener field, make sure that listener is selected. +Git Repository trigger
  34. +
  35. Click Save
  36. +
  37. On the Triggers tab, click Add trigger and click Manual. Associate that trigger with an event listener:
  38. +
  39. In the EventListener field, make sure that listener is selected.
  40. +
  41. Click Save. + Manual trigger + Note: Manual triggers run when you click Run pipeline and select the trigger. Git repository triggers run when the specified Git event type occurs for the specified Git repo and branch. The list of available event listeners is populated with the listeners that are defined in the pipeline code repo.
  42. +
  43. Click the Environment properties tab and define the environment properties for this tutorial. To add each property, click Add property and click Text property. Add these properties:
  44. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequired?Description
apikeyrequiredType the API key that you created earlier in this tutorial.
clusterOptional (cluster)Type the name of the Kubernetes cluster that you created.
registryNamespacerequiredType the IBM Image Registry namespace where the app image will be built and stored. To use an existing namespace, use the CLI and run ibmcloud cr namespace-list to identify all your current namespaces
repositoryrequiredType the source Git repository where your resources are stored. This value is the URL of the Git repository that you created earlier in this tutorial. To find your repo URL, return to your toolchain and click the Git tile. When the repository is shown, copy the URL.
revisionOptional (master)The Git branch
clusterRegionOptional (us-south)Type the region where your cluster is located.
clusterNamespaceOptional (prod)The namespace in your cluster where the app will be deployed.
registryRegionOptional (us-south)The region where your Image registry is located. To find your registry region, use the CLI and run ibmcloud cr region.
+

Environment properties +12. Click Save

+

Explore the pipeline

+

With a Tekton-based delivery pipeline, you can automate the continuous building, testing, and deployment of your apps.

+

The Tekton Delivery Pipeline dashboard displays an empty table until at least one Tekton pipeline runs. After a Tekton pipeline runs, either manually or as the result of external Git events, the table lists the run, its status, and the last updated time of the run definition.

+

To run the manual trigger that you set up in the previous task, click Run pipeline and select the name of the manual trigger that you created. The pipeline starts to run and you can see the progress on the dashboard. Pipeline runs can be in any of the following states:

+
    +
  • Pending: The PipelineRun definition is queued and waiting to run.
  • +
  • Running: The PipelineRun definition is running in the cluster.
  • +
  • Succeeded: The PipelineRun definition was successfully completed in the cluster.
  • +
  • +

    Failed: The PipelineRun definition run failed. Review the log file for the run to determine the cause. + Pipeline dashboard

    +
  • +
  • +

    For more information about a selected run, click any row in the table. You view the Task definition and the steps in each PipelineRun definition. You can also view the status, logs, and details of each Task definition and step, and the overall status of the PipelineRun definition. + Pipeline Log

    +
  • +
  • +

    The pipeline definition is stored in the pipeline.yaml file in the .tekton folder of your Git repository. Each task has a separate section of this file. The steps for each task are defined in the tasks.yaml file.

    +
  • +
  • +

    Review the pipeline-build-task. The task consists of a git clone of the repository followed by two steps:

    +
      +
    • pre-build-check: This step checks for the mandatory Dockerfile and runs a lint tool. It then checks the registry current plan and quota before it creates the image registry namespace if needed.
    • +
    • build-docker-image: This step creates the Docker image by using the IBM Cloud Container Registry build service through the ibmcloud cr build CLI script.
    • +
    +
  • +
  • Review the pipeline-validate-task. The task consists of a git clone of the repository, followed by the check-vulnerabilities step. This step runs the IBM Cloud Vulnerability Advisor on the image to check for known vulnerabilities. If it finds a vulnerability, the job fails, preventing the image from being deployed. This safety feature prevents apps with security holes from being deployed. The image has no vulnerabilities, so it passes. In this tutorial template, the default configuration of the job is to not block on failure.
  • +
  • Review the pipeline-deploy-task. The task consists of a git clone of the repository followed by two steps:
      +
    • pre-deploy-check: This step checks whether the IBM Container Service cluster is ready and has a namespace that is configured with access to the private image registry by using an IBM Cloud API Key.
    • +
    • deploy-to-kubernetes: This step updates the deployment.yml manifest file with the image url and deploys the application using kubectl apply
    • +
    +
  • +
  • After all the steps in the pipeline are completed, a green status is shown for each task. Click the deploy-to-kubernetes step and click the Logs tab to see the successful completion of this step. + Pipeline success
  • +
  • Scroll to the end of the log. The DEPLOYMENT SUCCEEDED message is shown at the end of the log. + Deployment succeeded
  • +
  • Click the URL to see the running application. + Running app
  • +
+

Modify the App Code

+

In this task, you modify the application and redeploy it. You can see how your Tekton-based delivery pipeline automatically picks up the changes in the application on commit and redeploys the app.

+
    +
  1. On the toolchain's Overview page, click the Git tile for your application.
      +
    • Tip: You can also use the built-in Eclipse Orion-based Web IDE, a local IDE, or your favorite editor to change the files in your repo.
    • +
    +
  2. +
  3. In the repository directory tree, open the app.js file. + File browser
  4. +
  5. Edit the text message code to change the welcome message. + Edit file
  6. +
  7. Commit the updated file by typing a commit message and clicking Commit changes to push the change to the project's remote repository.
  8. +
  9. Return to the toolchain's Overview page by clicking the back arrow.
  10. +
  11. Click Delivery Pipeline. The pipeline is running because the commit automatically started a build. Over the next few minutes, watch your change as it is built, tested, and deployed. + Dashboard redeployment
  12. +
  13. After the deploy-to-kubernetes step is completed, refresh your application URL. The updated message is shown.
  14. +
+

Clean up Resources

+

In this task, you can remove any of the content that is generated by this tutorial. Before you begin, you need the IBM Cloud CLI and the IBM Cloud Kubernetes Service CLI. Instructions to install the CLI are in the prerequisite section of this tutorial.

+
    +
  1. Delete the git repository, sign in into git, select personal projects. Then go to repository General settings and remove the repository.
  2. +
  3. Delete the toolchain. You can delete a toolchain and specify which of the associated tool integrations you want to delete. When you delete a toolchain, the deletion is permanent.
      +
    • On the DevOps dashboard, on the Toolchains page, click the toolchain to delete. Alternatively, on the app's Overview page, on the Continuous delivery card, click View Toolchain.
    • +
    • Click the More Actions menu, which is next to View app.
    • +
    • Click Delete. Deleting a toolchain removes all of its tool integrations, which might delete resources that are managed by those integrations.
    • +
    • Confirm the deletion by typing the name of the toolchain and clicking Delete.
    • +
    • Tip: When you delete a GitHub, GitHub Enterprise, or Git Repos and Issue Tracking tool integration, the associated repo isn't deleted from GitHub, GitHub Enterprise, or Git Repos and Issue Tracking. You must manually remove the repo.
    • +
    +
  4. +
  5. Delete the cluster or discard the namespace from it. It is easiest to delete the entire namespace (Please do not delete the default namespace) by using the IBM Cloud™ Kubernetes Service CLI from a command-line window. However, if you have other resources that you need to keep in the namespace, you need to delete the application resources individually instead of the entire namespace. To delete the entire namespace, enter this command: +
    kubectl delete namespace [not-the-default-namespace]
    +
  6. +
  7. Delete your IBM Cloud API key.
  8. +
  9. From the Manage menu, click Access (IAM). Click IBM Cloud API Keys.
  10. +
  11. Find your API Key in the list and select Delete from the menu to the right of the API Key name.
  12. +
  13. Delete the container images. To delete the images in your container image registry, enter this command in a command-line window: +
    ibmcloud cr image-rm IMAGE [IMAGE...]
    +
    + If you created a registry namespace for the tutorial, delete the entire registry namespace by entering this command: +
    ibmcloud cr namespace-rm NAMESPACE
    +
      +
    • Note: You can run this tutorial many times by using the same registry namespace and cluster parameters without discarding previously generated resources. The generated resources use randomized names to avoid conflicts.
    • +
    +
  14. +
+

Summary

+

You created a toolchain with a Tekton-based delivery pipeline that deploys a "Hello World" app to a secure container in a Kubernetes cluster. You changed a message in the app and tested your change. When you pushed the change to the repo, the delivery pipeline automatically redeployed the app.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/devops/images/Add_Tool_DP.png b/labs/devops/images/Add_Tool_DP.png new file mode 100644 index 0000000..182c360 Binary files /dev/null and b/labs/devops/images/Add_Tool_DP.png differ diff --git a/labs/devops/images/Add_Tool_Git.png b/labs/devops/images/Add_Tool_Git.png new file mode 100644 index 0000000..edb4559 Binary files /dev/null and b/labs/devops/images/Add_Tool_Git.png differ diff --git a/labs/devops/images/Blank_Template.png b/labs/devops/images/Blank_Template.png new file mode 100644 index 0000000..114a57a Binary files /dev/null and b/labs/devops/images/Blank_Template.png differ diff --git a/labs/devops/images/Continuous_Delivery.png b/labs/devops/images/Continuous_Delivery.png new file mode 100644 index 0000000..a204d4b Binary files /dev/null and b/labs/devops/images/Continuous_Delivery.png differ diff --git a/labs/devops/images/Jenkins_Credentials.png b/labs/devops/images/Jenkins_Credentials.png new file mode 100644 index 0000000..a3bd9c4 Binary files /dev/null and b/labs/devops/images/Jenkins_Credentials.png differ diff --git a/labs/devops/images/Jenkins_IKS_Pipeline_Build.png b/labs/devops/images/Jenkins_IKS_Pipeline_Build.png new file mode 100644 index 0000000..b37cce0 Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_Pipeline_Build.png differ diff --git a/labs/devops/images/Jenkins_IKS_Pipeline_BuildNow.png b/labs/devops/images/Jenkins_IKS_Pipeline_BuildNow.png new file mode 100644 index 0000000..a21adba Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_Pipeline_BuildNow.png differ diff --git a/labs/devops/images/Jenkins_IKS_Pipeline_details.png b/labs/devops/images/Jenkins_IKS_Pipeline_details.png new file mode 100644 index 0000000..65bb6ad Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_Pipeline_details.png differ diff --git a/labs/devops/images/Jenkins_IKS_credentials.png b/labs/devops/images/Jenkins_IKS_credentials.png new file mode 100644 index 0000000..0997af0 Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_credentials.png differ diff --git a/labs/devops/images/Jenkins_IKS_home.png b/labs/devops/images/Jenkins_IKS_home.png new file mode 100644 index 0000000..eed9c06 Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_home.png differ diff --git a/labs/devops/images/Jenkins_IKS_newitem.png b/labs/devops/images/Jenkins_IKS_newitem.png new file mode 100644 index 0000000..cb8fe2d Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_newitem.png differ diff --git a/labs/devops/images/Jenkins_IKS_parameter.png b/labs/devops/images/Jenkins_IKS_parameter.png new file mode 100644 index 0000000..861f71a Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_parameter.png differ diff --git a/labs/devops/images/Jenkins_IKS_pipeline_creation.png b/labs/devops/images/Jenkins_IKS_pipeline_creation.png new file mode 100644 index 0000000..1b63e94 Binary files /dev/null and b/labs/devops/images/Jenkins_IKS_pipeline_creation.png differ diff --git a/labs/devops/images/Jenkins_Pipeline_Build.png b/labs/devops/images/Jenkins_Pipeline_Build.png new file mode 100644 index 0000000..1f48d8b Binary files /dev/null and b/labs/devops/images/Jenkins_Pipeline_Build.png differ diff --git a/labs/devops/images/Jenkins_Pipeline_BuildNow.png b/labs/devops/images/Jenkins_Pipeline_BuildNow.png new file mode 100644 index 0000000..ee82cac Binary files /dev/null and b/labs/devops/images/Jenkins_Pipeline_BuildNow.png differ diff --git a/labs/devops/images/Jenkins_Pipeline_setup.png b/labs/devops/images/Jenkins_Pipeline_setup.png new file mode 100644 index 0000000..d0aca61 Binary files /dev/null and b/labs/devops/images/Jenkins_Pipeline_setup.png differ diff --git a/labs/devops/images/Jenkins_add_creds.png b/labs/devops/images/Jenkins_add_creds.png new file mode 100644 index 0000000..6b9e6ca Binary files /dev/null and b/labs/devops/images/Jenkins_add_creds.png differ diff --git a/labs/devops/images/Jenkins_all_secrets.png b/labs/devops/images/Jenkins_all_secrets.png new file mode 100644 index 0000000..f5cabd2 Binary files /dev/null and b/labs/devops/images/Jenkins_all_secrets.png differ diff --git a/labs/devops/images/Jenkins_creds_global.png b/labs/devops/images/Jenkins_creds_global.png new file mode 100644 index 0000000..19b1a0a Binary files /dev/null and b/labs/devops/images/Jenkins_creds_global.png differ diff --git a/labs/devops/images/Jenkins_ephemeral.png b/labs/devops/images/Jenkins_ephemeral.png new file mode 100644 index 0000000..894e97b Binary files /dev/null and b/labs/devops/images/Jenkins_ephemeral.png differ diff --git a/labs/devops/images/Jenkins_ephemeral_details_one.png b/labs/devops/images/Jenkins_ephemeral_details_one.png new file mode 100644 index 0000000..b2a8123 Binary files /dev/null and b/labs/devops/images/Jenkins_ephemeral_details_one.png differ diff --git a/labs/devops/images/Jenkins_ephemeral_details_three.png b/labs/devops/images/Jenkins_ephemeral_details_three.png new file mode 100644 index 0000000..f82ad8d Binary files /dev/null and b/labs/devops/images/Jenkins_ephemeral_details_three.png differ diff --git a/labs/devops/images/Jenkins_ephemeral_details_two.png b/labs/devops/images/Jenkins_ephemeral_details_two.png new file mode 100644 index 0000000..a3673ff Binary files /dev/null and b/labs/devops/images/Jenkins_ephemeral_details_two.png differ diff --git a/labs/devops/images/Jenkins_global_cred_creation.png b/labs/devops/images/Jenkins_global_cred_creation.png new file mode 100644 index 0000000..f76e180 Binary files /dev/null and b/labs/devops/images/Jenkins_global_cred_creation.png differ diff --git a/labs/devops/images/Jenkins_iks_login.png b/labs/devops/images/Jenkins_iks_login.png new file mode 100644 index 0000000..bd0d9e2 Binary files /dev/null and b/labs/devops/images/Jenkins_iks_login.png differ diff --git a/labs/devops/images/Jenkins_modify_deploy_repo.png b/labs/devops/images/Jenkins_modify_deploy_repo.png new file mode 100644 index 0000000..9ae34d9 Binary files /dev/null and b/labs/devops/images/Jenkins_modify_deploy_repo.png differ diff --git a/labs/devops/images/Jenkins_oc_login.png b/labs/devops/images/Jenkins_oc_login.png new file mode 100644 index 0000000..f2e5a83 Binary files /dev/null and b/labs/devops/images/Jenkins_oc_login.png differ diff --git a/labs/devops/images/Jenkins_oc_permissions.png b/labs/devops/images/Jenkins_oc_permissions.png new file mode 100644 index 0000000..a6355fc Binary files /dev/null and b/labs/devops/images/Jenkins_oc_permissions.png differ diff --git a/labs/devops/images/Jenkins_oc_ui.png b/labs/devops/images/Jenkins_oc_ui.png new file mode 100644 index 0000000..d369e20 Binary files /dev/null and b/labs/devops/images/Jenkins_oc_ui.png differ diff --git a/labs/devops/images/Jenkins_oc_url.png b/labs/devops/images/Jenkins_oc_url.png new file mode 100644 index 0000000..bbe5b62 Binary files /dev/null and b/labs/devops/images/Jenkins_oc_url.png differ diff --git a/labs/devops/images/Jenkins_pipeline_app_details.png b/labs/devops/images/Jenkins_pipeline_app_details.png new file mode 100644 index 0000000..1201baa Binary files /dev/null and b/labs/devops/images/Jenkins_pipeline_app_details.png differ diff --git a/labs/devops/images/Jenkins_pipeline_creation.png b/labs/devops/images/Jenkins_pipeline_creation.png new file mode 100644 index 0000000..7e3920c Binary files /dev/null and b/labs/devops/images/Jenkins_pipeline_creation.png differ diff --git a/labs/devops/images/Jenkins_secret_creation.png b/labs/devops/images/Jenkins_secret_creation.png new file mode 100644 index 0000000..bf3563c Binary files /dev/null and b/labs/devops/images/Jenkins_secret_creation.png differ diff --git a/labs/devops/images/Jenkins_secrets.png b/labs/devops/images/Jenkins_secrets.png new file mode 100644 index 0000000..17efeac Binary files /dev/null and b/labs/devops/images/Jenkins_secrets.png differ diff --git a/labs/devops/images/Pipeline_Dashboard.png b/labs/devops/images/Pipeline_Dashboard.png new file mode 100644 index 0000000..c08e451 Binary files /dev/null and b/labs/devops/images/Pipeline_Dashboard.png differ diff --git a/labs/devops/images/Pipeline_Details.png b/labs/devops/images/Pipeline_Details.png new file mode 100644 index 0000000..2e6771d Binary files /dev/null and b/labs/devops/images/Pipeline_Details.png differ diff --git a/labs/devops/images/Region_Select.png b/labs/devops/images/Region_Select.png new file mode 100644 index 0000000..51de3c5 Binary files /dev/null and b/labs/devops/images/Region_Select.png differ diff --git a/labs/devops/images/Tekton_App.png b/labs/devops/images/Tekton_App.png new file mode 100644 index 0000000..dd30488 Binary files /dev/null and b/labs/devops/images/Tekton_App.png differ diff --git a/labs/devops/images/Tekton_Commit.png b/labs/devops/images/Tekton_Commit.png new file mode 100644 index 0000000..3e21d69 Binary files /dev/null and b/labs/devops/images/Tekton_Commit.png differ diff --git a/labs/devops/images/Tekton_Deployment_Success.png b/labs/devops/images/Tekton_Deployment_Success.png new file mode 100644 index 0000000..19407a0 Binary files /dev/null and b/labs/devops/images/Tekton_Deployment_Success.png differ diff --git a/labs/devops/images/Tekton_Environment.png b/labs/devops/images/Tekton_Environment.png new file mode 100644 index 0000000..615c12a Binary files /dev/null and b/labs/devops/images/Tekton_Environment.png differ diff --git a/labs/devops/images/Tekton_Files.png b/labs/devops/images/Tekton_Files.png new file mode 100644 index 0000000..af29c18 Binary files /dev/null and b/labs/devops/images/Tekton_Files.png differ diff --git a/labs/devops/images/Tekton_Git_Setup.png b/labs/devops/images/Tekton_Git_Setup.png new file mode 100644 index 0000000..861ae00 Binary files /dev/null and b/labs/devops/images/Tekton_Git_Setup.png differ diff --git a/labs/devops/images/Tekton_Manual_Trigger.png b/labs/devops/images/Tekton_Manual_Trigger.png new file mode 100644 index 0000000..ac40c63 Binary files /dev/null and b/labs/devops/images/Tekton_Manual_Trigger.png differ diff --git a/labs/devops/images/Tekton_Redeploy.png b/labs/devops/images/Tekton_Redeploy.png new file mode 100644 index 0000000..c331489 Binary files /dev/null and b/labs/devops/images/Tekton_Redeploy.png differ diff --git a/labs/devops/images/Tekton_Repo_Definition.png b/labs/devops/images/Tekton_Repo_Definition.png new file mode 100644 index 0000000..47cbf59 Binary files /dev/null and b/labs/devops/images/Tekton_Repo_Definition.png differ diff --git a/labs/devops/images/Tekton_Select.png b/labs/devops/images/Tekton_Select.png new file mode 100644 index 0000000..da36228 Binary files /dev/null and b/labs/devops/images/Tekton_Select.png differ diff --git a/labs/devops/images/Tekton_Success.png b/labs/devops/images/Tekton_Success.png new file mode 100644 index 0000000..c5c9d5f Binary files /dev/null and b/labs/devops/images/Tekton_Success.png differ diff --git a/labs/devops/images/Tekton_Trigger.png b/labs/devops/images/Tekton_Trigger.png new file mode 100644 index 0000000..0cf1f7f Binary files /dev/null and b/labs/devops/images/Tekton_Trigger.png differ diff --git a/labs/devops/images/Tekton_Worker.png b/labs/devops/images/Tekton_Worker.png new file mode 100644 index 0000000..a88b590 Binary files /dev/null and b/labs/devops/images/Tekton_Worker.png differ diff --git a/labs/devops/images/add_repo_argo.png b/labs/devops/images/add_repo_argo.png new file mode 100644 index 0000000..7a599e5 Binary files /dev/null and b/labs/devops/images/add_repo_argo.png differ diff --git a/labs/devops/images/app_argo_1.png b/labs/devops/images/app_argo_1.png new file mode 100644 index 0000000..8ce506d Binary files /dev/null and b/labs/devops/images/app_argo_1.png differ diff --git a/labs/devops/images/app_argo_2.png b/labs/devops/images/app_argo_2.png new file mode 100644 index 0000000..eaedaca Binary files /dev/null and b/labs/devops/images/app_argo_2.png differ diff --git a/labs/devops/images/argocd_login.png b/labs/devops/images/argocd_login.png new file mode 100644 index 0000000..d580f13 Binary files /dev/null and b/labs/devops/images/argocd_login.png differ diff --git a/labs/devops/images/create_project_oc.png b/labs/devops/images/create_project_oc.png new file mode 100644 index 0000000..7608a02 Binary files /dev/null and b/labs/devops/images/create_project_oc.png differ diff --git a/labs/devops/images/ibmcloud_oc_cluster.png b/labs/devops/images/ibmcloud_oc_cluster.png new file mode 100644 index 0000000..7ac2562 Binary files /dev/null and b/labs/devops/images/ibmcloud_oc_cluster.png differ diff --git a/labs/devops/images/manage_repo_argocd.png b/labs/devops/images/manage_repo_argocd.png new file mode 100644 index 0000000..c16c544 Binary files /dev/null and b/labs/devops/images/manage_repo_argocd.png differ diff --git a/labs/devops/images/openshift_console.png b/labs/devops/images/openshift_console.png new file mode 100644 index 0000000..ca210f4 Binary files /dev/null and b/labs/devops/images/openshift_console.png differ diff --git a/labs/devops/images/out_of_sync.png b/labs/devops/images/out_of_sync.png new file mode 100644 index 0000000..8c8eaf3 Binary files /dev/null and b/labs/devops/images/out_of_sync.png differ diff --git a/labs/devops/images/sampl_app_output.png b/labs/devops/images/sampl_app_output.png new file mode 100644 index 0000000..11542a6 Binary files /dev/null and b/labs/devops/images/sampl_app_output.png differ diff --git a/labs/devops/images/sample_app_deployed.png b/labs/devops/images/sample_app_deployed.png new file mode 100644 index 0000000..c3049d6 Binary files /dev/null and b/labs/devops/images/sample_app_deployed.png differ diff --git a/labs/devops/images/sample_app_full_deployment.png b/labs/devops/images/sample_app_full_deployment.png new file mode 100644 index 0000000..33c9ad8 Binary files /dev/null and b/labs/devops/images/sample_app_full_deployment.png differ diff --git a/labs/devops/images/sampleapp_create.png b/labs/devops/images/sampleapp_create.png new file mode 100644 index 0000000..7395889 Binary files /dev/null and b/labs/devops/images/sampleapp_create.png differ diff --git a/labs/devops/images/search_jenkins.png b/labs/devops/images/search_jenkins.png new file mode 100644 index 0000000..c3f6d26 Binary files /dev/null and b/labs/devops/images/search_jenkins.png differ diff --git a/labs/devops/images/synched_app.png b/labs/devops/images/synched_app.png new file mode 100644 index 0000000..96cb87f Binary files /dev/null and b/labs/devops/images/synched_app.png differ diff --git a/labs/devops/jenkins/index.html b/labs/devops/jenkins/index.html new file mode 100644 index 0000000..3370085 --- /dev/null +++ b/labs/devops/jenkins/index.html @@ -0,0 +1,2337 @@ + + + + + + + + + + + + + + + + + + + + + + + Jenkins - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Jenkins Lab

+
+
+
+

Introduction

+

In this lab, you will learn about how to define Continuous Integration for your application. We are using Jenkins to define it.

+

Jenkins

+

Jenkins is a popular open source Continuous Integration tool. It is built in Java. It allows the developers to perform continuous integration and build automation. It allows you to define steps and executes them based on the instructions like building the application using build tools like Ant, Gradle, Maven etc, executing shell scripts, running tests etc. All the steps can be executed based on the timing or event. It depends on the setup. It helps to monitor all these steps and sends notifications to the team members in case of failures. Also, it is very flexible and has a large plugin list which one easily add based on their requirements.

+

Check these guides out if you want to know more about Jenkins - Jenkins, Leading open source automation server.

+

Prerequisites

+
    +
  • You need an IBM cloud account.
  • +
  • Create kubernetes cluster using IBM Cloud Kubernetes Service. Here, you can choose an openshift cluster.
  • +
  • Install oc command line tool.
  • +
  • You should be familiar with basics like Containers, Docker, Kubernetes.
  • +
+

Continuous Integration

+

Install Jenkins

+
    +
  • Open the IBM Cloud Openshift cluster.
  • +
+

OC Cluster

+
    +
  • Click on the OpenShift web console tab and this will take you to openshift UI.
  • +
+

OC Cluster

+
    +
  • Create a new project.
  • +
+

OC Cluster

+
    +
  • Search for Jenkins.
  • +
+

Search Jenkins

+
    +
  • Choose Jenkins (Ephemeral).
  • +
+

OC Cluster

+
    +
  • Install it.
  • +
+

OC Cluster

+

OC Cluster

+

OC Cluster

+
    +
  • Wait till the Jenkins installs and the pods are ready.
  • +
+

OC Cluster

+
    +
  • Once, it is ready you can access the Jenkins by clicking the link.
  • +
+

OC Cluster

+

Now, click on Log in with OpenShift.

+
    +
  • When you gets logged in, you will see the below screen. Click Allow selected permissions.
  • +
+

OC Cluster

+
    +
  • You will be able to access the Jenkins UI now.
  • +
+

OC Cluster

+

Get the Sample App

+
    +
  • Fork the below repository.
  • +
+
https://github.com/ibm-cloud-architecture/cloudnative_sample_app
+
+
    +
  • Clone the forked repository.
  • +
+
$ git clone https://github.com/(user)/cloudnative_sample_app.git
+
+

Jenkinsfile

+

Before setting up the CI pipeline, let us first have a look at our Jenkinsfile and understand the stages here.

+

Open your Jenkinsfile or you can also access it https://github.com/ibm-cloud-architecture/cloudnative_sample_app/blob/master/Jenkinsfile[here].

+

In our Jenkins file, we have five stages.

+
    +
  • Local - Build
  • +
+

In this stage, we are building the application and packaging it using maven.

+
    +
  • Local - Test
  • +
+

In this stage, we are making all the unit tests are running fine by running maven test.

+
    +
  • Local - Run
  • +
+

In this stage, we are running the application using the previous build and verifying the application performing health and api checks.

+
    +
  • +

    Build and Push Image

    +
  • +
  • +

    We are logging in to the IBM Cloud and accessing the IBM Cloud Container Registry.

    +
  • +
  • We are also creating a namespace if not present.
  • +
  • We are building the image using ibmcloud cli tools.
  • +
  • Once the image is built, it is pushed into the container registry.
  • +
+

In this stage, we are building the docker image and pushing it to the registry.

+
    +
  • +

    Push to Deploy repo

    +
  • +
  • +

    Initially, we are cloning the deploy repository.

    +
  • +
  • Changing the image tag to the one we previously built and pushed.
  • +
  • Pushing this new changes to the deploy repository.
  • +
+

In this stage, we are pushing the new artifact tag to the deploy repository which will later be used by the Continuous Delivery system.

+

Jenkins Credentials

+

Let us now build all the credentials required by the pipeline.

+
    +
  • In the Jenkins home page, click on Credentials.
  • +
+

OC Cluster

+
    +
  • In the Credentials page, click on Jenkins.
  • +
+

OC Cluster

+
    +
  • Now, click on Global Credentials (UnRestricted).
  • +
+

OC Cluster

+
    +
  • Click on Add Credentials to create the ones required for this lab.
  • +
+

image::Jenkins_add_creds.png[align="center"] +OC Cluster

+
    +
  • Now create a secrets as follows.
  • +
+

Kind : Secret Text +Secret: (Your container registry url, for eg., us.icr.io) +ID: registry_url

+

OC Cluster

+

Once created, you will see something like below.

+

OC Cluster

+

Similarly create the rest of the credentials as well.

+
+

Kind : Secret Text +Secret: (Your registry namespace, for eg., catalyst_cloudnative) +ID: registry_namespace

+

Kind : Secret Text +Secret: (Your IBM cloud region, for eg., us-east) +ID: ibm_cloud_region

+

Kind : Secret Text +Secret: (Your IBM Cloud API key) +ID: ibm_cloud_api_key

+

Kind : Secret Text +Secret: (Your Github Username) +ID: git-account

+

Kind : Secret Text +Secret: (Your Github Token) +ID: github-token

+
+

Once all of them are created, you will have the list as follows.

+

OC Cluster

+

Jenkins Pipeline

+
    +
  • Create a new pieline. Go to Jenkins ) Click on New Item.
  • +
+

OC Cluster

+
    +
  • Enter the name of the application, choose Pipeline and click OK.
  • +
+

OC Cluster

+
    +
  • +

    Now go to the Pipeline tab and enter the details of the repository.

    +
  • +
  • +

    In the Definition, choose Pipeline script from SCM.

    +
  • +
  • Mention SCM as Git.
  • +
  • Enter the repository URL in Repository URL.
  • +
  • Specify master as the branch to build.
  • +
  • Save this information.
  • +
+

OC Cluster

+
    +
  • To initiate a build, click Build Now.
  • +
+

OC Cluster

+
    +
  • Once the build is successful, you will see something like below.
  • +
+

OC Cluster

+

After this build is done, your deploy repository will be updated by the Jenkins.

+

OC Cluster

+
+
+

Introduction

+

In this lab, you will learn about how to define Continuous Integration for your application. We are using https://jenkins.io/[Jenkins] to define it.

+

Jenkins

+

Jenkins is a popular open source Continuous Integration tool. It is built in Java. It allows the developers to perform continuous integration and build automation. It allows you to define steps and executes them based on the instructions like building the application using build tools like Ant, Gradle, Maven etc, executing shell scripts, running tests etc. All the steps can be executed based on the timing or event. It depends on the setup. It helps to monitor all these steps and sends notifications to the team members in case of failures. Also, it is very flexible and has a large plugin list which one easily add based on their requirements.

+

Check these guides out if you want to know more about Jenkins - https://jenkins.io/doc/[Jenkins, Leading open source automation server].

+

Prerequisites

+
    +
  • You need an https://cloud.ibm.com/login[IBM cloud account].
  • +
  • Create kubernetes cluster using https://cloud.ibm.com/docs/containers?topic=containers-getting-started[IBM Cloud Kubernetes Service]. Here, you can choose a kubernetes cluster.
  • +
  • Install https://kubernetes.io/docs/tasks/tools/install-kubectl/[kubectl] command line tool.
  • +
  • You should be familiar with basics like Containers, Docker, Kubernetes.
  • +
+

Continuous Integration

+

Install Jenkins

+
    +
  • Initially log in into your ibm cloud account as follows.
  • +
+
$ ibmcloud login -a cloud.ibm.com -r (region) -g (cluster_name)
+
+

And then download the Kube config files as below.

+
$ ibmcloud ks cluster-config --cluster (cluster_name)
+
+

You can also get the access instructions in IBM Cloud Dashboard -> Kubernetes Clusters -> Click on your Cluster -> Click on Access Tab.

+
    +
  • Install Jenkins using helm using the below command. We are not using persistence in this lab.
  • +
+
$ helm install --name cloudnative-jenkins --set persistence.enabled=false stable/jenkins
+
+

If it is successfully executed, you will see something like below.

+
$ helm install --name cloudnative-jenkins --set persistence.enabled=false stable/jenkins
+NAME:   cloudnative
+LAST DEPLOYED: Wed Aug  7 16:22:55 2019
+NAMESPACE: default
+STATUS: DEPLOYED
+
+RESOURCES:
+==> v1/ConfigMap
+NAME                       DATA  AGE
+cloudnative-jenkins        5     1s
+cloudnative-jenkins-tests  1     1s
+
+==> v1/Deployment
+NAME                 READY  UP-TO-DATE  AVAILABLE  AGE
+cloudnative-jenkins  0/1    1           0          1s
+
+==> v1/Pod(related)
+NAME                                  READY  STATUS    RESTARTS  AGE
+cloudnative-jenkins-57588c86c7-hxqmq  0/1    Init:0/1  0         0s
+
+==> v1/Role
+NAME                                 AGE
+cloudnative-jenkins-schedule-agents  1s
+
+==> v1/RoleBinding
+NAME                                 AGE
+cloudnative-jenkins-schedule-agents  1s
+
+==> v1/Secret
+NAME                 TYPE    DATA  AGE
+cloudnative-jenkins  Opaque  2     1s
+
+==> v1/Service
+NAME                       TYPE          CLUSTER-IP      EXTERNAL-IP     PORT(S)         AGE
+cloudnative-jenkins        LoadBalancer  172.21.143.35   169.63.132.124  8080:32172/TCP  1s
+cloudnative-jenkins-agent  ClusterIP     172.21.206.235  (none>          50000/TCP       1s
+
+==> v1/ServiceAccount
+NAME                 SECRETS  AGE
+cloudnative-jenkins  1        1s
+
+

Use the following steps to open Jenkins UI and login.

+
NOTES:
+1. Get your 'admin' user password by running:
+printf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
+2. Get the Jenkins URL to visit by running these commands in the same shell:
+NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        You can watch the status of by running 'kubectl get svc --namespace default -w cloudnative-jenkins'
+export SERVICE_IP=$(kubectl get svc --namespace default cloudnative-jenkins --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}")
+echo http://$SERVICE_IP:8080/login
+
+3. Login with the password from step 1 and the username: admin
+
+
+For more information on running Jenkins on Kubernetes, visit:
+https://cloud.google.com/solutions/jenkins-on-container-engine
+#################################################################################
+######   WARNING: Persistence is disabled!!! You will lose your data when   #####
+######            the Jenkins pod is terminated.                            #####
+#################################################################################
+
+

To get the url, run the below commands.

+
$ export SERVICE_IP=$(kubectl get svc --namespace default cloudnative-jenkins --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}")
+$ echo http://$SERVICE_IP:8080/login
+
+

Once executed, you will see something like below.

+
$ echo http://$SERVICE_IP:8080/login
+http://169.63.132.124:8080/login
+
+
    +
  • Now, let us login into the Jenkins.
  • +
+

OC Cluster

+

The user name will be admin and to get the password, run the below command.

+
$ printf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
+
+

It returns you the password as follows.

+
$ printf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
+password
+
+
    +
  • Once, successfully logged in you will see the Jenkins home page which is as follows.
  • +
+

OC Cluster

+

Get the Sample App

+
    +
  • +

    Fork the below repository.

    +

    https://github.com/ibm-cloud-architecture/cloudnative_sample_app

    +
  • +
  • +

    Clone the forked repository.

    +
  • +
+
$ git clone https://github.com/(user)/cloudnative_sample_app.git
+
+

Jenkinsfile

+

Before setting up the CI pipeline, let us first have a look at our Jenkinsfile and understand the stages here.

+

Open your Jenkinsfile or you can also access it https://github.com/ibm-cloud-architecture/cloudnative_sample_app/blob/master/Jenkinsfile[here].

+

In our Jenkins file, we have five stages.

+
    +
  • Local - Build
  • +
+

In this stage, we are building the application and packaging it using maven.

+
    +
  • Local - Test
  • +
+

In this stage, we are making all the unit tests are running fine by running maven test.

+
    +
  • Local - Run
  • +
+

In this stage, we are running the application using the previous build and verifying the application performing health and api checks.

+
    +
  • +

    Build and Push Image

    +
  • +
  • +

    We are logging in to the IBM Cloud and accessing the IBM Cloud Container Registry.

    +
  • +
  • We are also creating a namespace if not present.
  • +
  • We are building the image using ibmcloud cli tools.
  • +
  • Once the image is built, it is pushed into the container registry.
  • +
+

In this stage, we are building the docker image and pushing it to the registry.

+
    +
  • +

    Push to Deploy repo

    +
  • +
  • +

    Initially, we are cloning the deploy repository.

    +
  • +
  • Changing the image tag to the one we previously built and pushed.
  • +
  • Pushing this new changes to the deploy repository.
  • +
+

In this stage, we are pushing the new artifact tag to the deploy repository which will later be used by the Continuous Delivery system.

+

Jenkins Credentials

+

Let us now build all the credentials required by the pipeline.

+
    +
  • In the Jenkins home page, click on Credentials.
  • +
+

OC Cluster

+
    +
  • In the Credentials page, click on Jenkins.
  • +
+

OC Cluster

+
    +
  • Now, click on Global Credentials (UnRestricted).
  • +
+

OC Cluster

+
    +
  • Click on Add Credentials to create the ones required for this lab.
  • +
+

OC Cluster

+
    +
  • Now create a secrets as follows.
  • +
+
+

Kind : Secret Text +Secret: Your container registry url, for eg., us.icr.io +ID: registry_url

+
+

OC Cluster

+

Once created, you will see something like below.

+

OC Cluster

+

Similarly create the rest of the credentials as well.

+
+

Kind : Secret Text +Secret: (Your registry namespace, for eg., catalyst_cloudnative) +ID: registry_namespace

+

Kind : Secret Text +Secret: (Your IBM cloud region, for eg., us-east) +ID: ibm_cloud_region

+

Kind : Secret Text +Secret: (Your IBM Cloud API key) +ID: ibm_cloud_api_key

+

Kind : Secret Text +Secret: (Your Github Username) +ID: git-account

+

Kind : Secret Text +Secret: (Your Github Token) +ID: github-token

+
+

Once all of them are created, you will have the list as follows.

+

OC Cluster

+

Jenkins Pipeline

+
    +
  • Create a new pieline. Go to Jenkins ) Click on New Item.
  • +
+

OC Cluster

+
    +
  • Enter the name of your application, select Pipeline and then click OK.
  • +
+

OC Cluster

+
    +
  • In General, check This project is parameterized. Create a string parameter with name CLOUD and Default value kubernetes.
  • +
+

OC Cluster

+
    +
  • +

    Now go to the Pipeline tab and enter the details of the repository.

    +
  • +
  • +

    In the Definition, choose Pipeline script from SCM.

    +
  • +
  • Mention SCM as Git.
  • +
  • Enter the repository URL in Repository URL.
  • +
  • Specify master as the branch to build.
  • +
  • Save this information.
  • +
+

OC Cluster

+
    +
  • To initiate a build, click Build with Parameters.
  • +
+

OC Cluster

+
    +
  • Once the build is successful, you will see something like below.
  • +
+

OC Cluster

+

After this build is done, your deploy repository will be updated by the Jenkins.

+

OC Cluster

+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/devops/tekton/index.html b/labs/devops/tekton/index.html new file mode 100644 index 0000000..3adbaf4 --- /dev/null +++ b/labs/devops/tekton/index.html @@ -0,0 +1,3185 @@ + + + + + + + + + + + + + + + + + + + + + + + Tekton - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Tekton

+ +
+
+
+

Prerequisites

+

Make sure your environment is properly setup.

+

Follow the instructions here

+

SetUp

+

Tekton CLI Installation

+
    +
  • +

    Tekton CLI is command line utility used to interact with the Tekton resources.

    +
  • +
  • +

    Follow the instructions on the tekton CLI github repository https://github.com/tektoncd/cli#installing-tkn

    +
  • +
  • +

    For MacOS for example you can use brew +

    brew tap tektoncd/tools
    +brew install tektoncd/tools/tektoncd-cli
    +

    +
  • +
  • Verify the Tekton cli +
    tkn version
    +
  • +
  • The command should show a result like: +
    $ tkn version
    +Client version: 0.10.0
    +
  • +
  • If you already have the tkn install you can upgrade running +
    brew upgrade tektoncd/tools/tektoncd-cli
    +
  • +
+

Tekton Pipelines Installation

+
    +
  • To deploy the Tekton pipelines: + oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/tekton-lab/tekton-operator.yaml
  • +
  • Note: It will take few mins for the Tekton pipeline components to be installed, you an watch the status using the command: +
    oc get pods -n openshift-operators -w
    +
    + You can use Ctrl+c to terminate the watch
  • +
  • A successful deployment of Tekton pipelines will show the following pods: +
    NAME                                         READY   STATUS    RESTARTS   AGE
    +openshift-pipelines-operator-9cdbbb854-x9tvs   1/1     Running   0          25s
    +
  • +
+

Create Target Namespace

+
    +
  • Set the environment variable NAMESPACE to tekton-demo, if you open a new terminal remember to set this environment again +
    export NAMESPACE=tekton-demo
    +
  • +
  • Create a the namespace using the variable NAMESPACE +
    oc new-project $NAMESPACE
    +
  • +
+

Tasks

+

Task Creation

+
    +
  • Create the below yaml files.
  • +
  • The following snippet shows what a Tekton Task YAML looks like:
  • +
  • +

    Create the file task-test.yaml +

    apiVersion: tekton.dev/v1beta1
    +kind: Task
    +metadata:
    +name: java-test
    +spec:
    +params:
    +    - name: url
    +    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +    - name: revision
    +    default: master
    +steps:
    +    - name: git-clone
    +    image: alpine/git
    +    script: |
    +        git clone -b $(params.revision) --depth 1 $(params.url) /source
    +    volumeMounts:
    +        - name: source
    +        mountPath: /source
    +    - name: test
    +    image: maven:3.3-jdk-8
    +    workingdir: /source
    +    script: |
    +        mvn test
    +        echo "tests passed with rc=$?"
    +    volumeMounts:
    +        - name: m2-repository
    +        mountPath: /root/.m2
    +        - name: source
    +        mountPath: /source
    +volumes:
    +    - name: m2-repository
    +    emptyDir: {}
    +    - name: source
    +    emptyDir: {}
    +

    +
  • +
  • +

    Each Task has the following:

    +
  • +
  • name - the unique name using which the task can be referred
      +
    • name - the name of the parameter
    • +
    • description - the description of the parameter
    • +
    • default - the default value of parameter
    • +
    +
  • +
  • +

    Note: The TaskRun or PipelineRun could override the parameter values, if no parameter value is passed then the default value will be used.

    +
  • +
  • +

    steps - One or more sub-tasks that will be executed in the defined order. The step has all the attributes like a Pod spec

    +
  • +
  • volumes - the task can also mount external volumes using the volumes attribute.
  • +
  • The parameters that were part of the spec inputs params can be used in the steps using the notation $(<variable-name>).
  • +
+

Task Deploy

+
    +
  • +

    The application test task could be created using the command: +

    oc apply -f task-test.yaml -n $NAMESPACE
    +

    +
  • +
  • +

    We will use the Tekton cli to inspect the created resources +

    tkn task ls -n $NAMESPACE
    +

    +
  • +
  • +

    The above command should list one Task as shown below: +

    NAME        AGE
    +java-test   22 seconds ago
    +

    +
  • +
+

TaskRun

+
    +
  • The TaskRun is used to run a specific task independently. In the following section we will run the build-app task created in the previous step
  • +
+

TaskRun Creation

+
    +
  • The following snippet shows what a Tekton TaskRun YAML looks like:
  • +
  • Create the file taskrun-test.yaml +
    apiVersion: tekton.dev/v1beta1
    +kind: TaskRun
    +metadata:
    +generateName: test-task-run-
    +spec:
    +taskRef:
    +    name: java-test
    +params:
    +    - name: url
    +    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +
  • +
  • generateName - since the TaskRun can be run many times, in order to have unqiue name across the TaskRun ( helpful when checking the TaskRun history) we use this generateName instead of name. When Kubernetes sees generateName it will generate unquie set of characters and suffix the same to build-app-, similar to how pod names are generated
  • +
  • taskRef - this is used to refer to the Task by its name that will be run as part of this TaskRun. In this example we use build-app Task.
  • +
  • As described in the earlier section that the Task inputs and outputs could be overridden via TaskRun.
  • +
  • params - this are the parameter values that are passed to the task
  • +
  • The application test task(java-maven-test) could be run using the command: +
    kubectl create -f taskrun-test.yaml -n $NAMESPACE 
    +
  • +
  • Note - As tasks will use generated name, never use oc apply -f taskrun-test.yaml
  • +
  • +

    We will use the Tekton cli to inspect the created resources: +

    tkn tr ls -n $NAMESPACE
    +
    + The above command should list one TaskRun as shown below: +
    NAME                       STARTED        DURATION   STATUS
    +test-task-run-q6s8c        1 minute ago   ---        Running(Pending)
    +
    + Note - It will take few seconds for the TaskRun to show status as Running as it needs to download the container images.

    +
  • +
  • +

    To check the logs of the Task Run using the tkn: +

    tkn tr logs -f --last -n $NAMESPACE
    +
    +Note - Each task step will be run within a container of its own. +The -f or -a allows to tail the logs from all the containers of the task. For more options run tkn tr logs --help

    +
  • +
  • If you see the TaskRun status as Failed or Error use the following command to check the reason for error: +
    tkn tr describe --last -n $NAMESPACE
    +
  • +
  • If it is successful, you will see something like below. +
    tkn tr ls -n $NAMESPACE
    +
    + The above command should list one TaskRun as shown below: +
    NAME                  STARTED          DURATION     STATUS
    +test-task-run   47 seconds ago   34 seconds   Succeeded
    +
  • +
+

Creating additional tasks and deploying them

+
    +
  • Create a Task to build a container image and push to the registry
  • +
  • This task will be later used by the pipeline.
  • +
  • Download the task file task-buildah.yaml to build the image, push the image to the registy:
  • +
  • Create the buildah Task using the file and the command: +
    oc apply -f task-buildah.yaml -n $NAMESPACE
    +
  • +
  • Use the Tekton cli to inspect the created resources +
    tkn task ls -n $NAMESPACE
    +
  • +
  • +

    The above command should list one Task as shown below: +

    NAME              AGE
    +buildah            4 seconds ago
    +java-test         46 minutes ago
    +

    +
  • +
  • +

    Create an environment variable for location to push the image to be build. Replace NAMESPACE for the dockerhub username, or IBM CR Namespace +

    export REGISTRY_SERVER=image-registry.openshift-image-registry.svc:5000
    +export IMAGE_URL=${REGISTRY_SERVER}/${NAMESPACE}/cloudnative_sample_app
    +echo IMAGE_URL=${IMAGE_URL}
    +

    +
  • +
  • +

    Lets create a Task Run for buildah Task using the tkn CLI passing the inputs, outputs and service account. +

    tkn task start buildah --showlog \
    +-p image=${IMAGE_URL} \
    +-p url=https://github.com/ibm-cloud-architecture/cloudnative_sample_app \
    +-s pipeline \
    +-n $NAMESPACE
    +
    + The task will start and logs will start printing automatically +
    Taskrun started: buildah-run-vvrg2
    +Waiting for logs to be available...
    +

    +
  • +
  • +

    Verify the status of the Task Run +

    tkn tr ls -n $NAMESPACE
    +
    + Output should look like this +
    NAME                  STARTED          DURATION     STATUS
    +buildah-run-zbsrv      2 minutes ago    1 minute     Succeeded
    +

    +
  • +
  • To clean up all Pods associated with all Task Runs, delete all the task runs resources +
    oc delete taskrun --all -n $NAMESPACE
    +
  • +
  • (Optional) Instead of starting the Task via tkn task start you could also use yaml TaskRun, create a file taskrun.yaml +
    apiVersion: tekton.dev/v1beta1
    +kind: TaskRun
    +metadata:
    +generateName: buildah-task-run-
    +spec:
    +serviceAccountName: pipeline
    +taskRef:
    +    name: buildah
    +params:
    +    - name: url
    +    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +    - name: image
    +    value: image-registry.openshift-image-registry.svc:5000/tekton-demo/cloudnative_sample_app
    +
    + Then create the TaskRun with +
    oc create -f taskrun-buildah.yaml -n $NAMESPACE
    +
    + Follow the logs with: +
    tkn tr logs -f -n $NAMESPACE
    +
  • +
+

Pipelines

+

Pipeline Creation

+
    +
  • +

    Pipelines allows to start multiple Tasks, in parallel or in a certain order

    +
  • +
  • +

    Create the file pipeline.yaml, the Pipeline contains two Tasks +

    apiVersion: tekton.dev/v1beta1
    +kind: Pipeline
    +metadata:
    +name: test-build
    +spec:
    +params:
    +    - name: repo-url
    +    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +    - name: revision
    +    default: master
    +    - name: image-server
    +    default: image-registry.openshift-image-registry.svc:5000
    +    - name: image-namespace
    +    default: tekton-demo
    +    - name: image-repository
    +    default: cloudnative_sample_app
    +tasks:
    +    - name: test
    +    taskRef:
    +        name: java-test
    +    params:
    +        - name: url
    +        value: $(params.repo-url)
    +        - name: revision
    +        value: $(params.revision)
    +    - name: build
    +    runAfter: [test]
    +    taskRef:
    +        name: buildah
    +    params:
    +        - name: image
    +        value: $(params.image-server)/$(params.image-namespace)/$(params.image-repository)
    +        - name: url
    +        value: $(params.repo-url)
    +        - name: revision
    +        value: $(params.revision)
    +

    +
  • +
  • +

    Pipeline defines a list of Tasks to execute in order, while also indicating if any outputs should be used as inputs of a following Task by using the from field and also indicating the order of executing (using the runAfter and from fields). The same variable substitution you used in Tasks is also available in a Pipeline.

    +
  • +
  • Create the Pipeline using the command: +
    oc apply -f pipeline.yaml -n $NAMESPACE
    +
  • +
  • Use the Tekton cli to inspect the created resources +
    tkn pipeline ls -n $NAMESPACE
    +
    +The above command should list one Pipeline as shown below: +
    NAME              AGE              LAST RUN   STARTED   DURATION   STATUS
    +test-build-push   31 seconds ago   ---        ---       ---        ---
    +
  • +
+

PipelineRun

+

PipelineRun Creation

+
    +
  • To execute the Tasks in the Pipeline, you must create a PipelineRun. Creation of a PipelineRun will trigger the creation of TaskRuns for each Task in your pipeline.
  • +
  • Create the file pipelinerun.yaml +
    apiVersion: tekton.dev/v1alpha1
    +kind: PipelineRun
    +metadata:
    +generateName: test-build-run-
    +spec:
    +serviceAccountName: pipeline
    +pipelineRef:
    +    name: test-build
    +params:
    +    - name: image-server
    +    value: image-registry.openshift-image-registry.svc:5000
    +    - name: image-namespace
    +    value: tekton-demo
    +
    + serviceAccount - it is always recommended to have a service account associated with PipelineRun, which can then be used to define fine grained roles.
  • +
  • Create the PipelineRun using the command: +
    oc create -f pipelinerun.yaml -n $NAMESPACE
    +
  • +
  • +

    We will use the Tekton cli to inspect the created resources +

    tkn pipelinerun ls -n $NAMESPACE
    +

    +
  • +
  • +

    The above command should list one PipelineRun as shown below: +

    NAME                        STARTED         DURATION   STATUS
    +test-build-push-run-c7zgv   8 seconds ago   ---        Running
    +

    +
  • +
  • +

    Wait for few minutes for your pipeline to complete all the tasks. If it is successful, you will see something like below. +

    tkn pipeline ls -n $NAMESPACE
    +
    +
    NAME              AGE              LAST RUN                    STARTED         DURATION    STATUS
    +test-build-push   33 minutes ago   test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded
    +

    +
  • +
  • +

    Run again the pipeline ls command +

    tkn pipelinerun ls -n $NAMESPACE
    +
    +
    NAME                        STARTED         DURATION    STATUS
    +test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded
    +
    + If it is successful, go to your container registry account and verify if you have the cloudnative_sample_app image pushed.

    +
  • +
  • +

    (Optional) Run the pipeline again using the tkn CLI +

    tkn pipeline start test-build --showlog \
    +-s pipeline \
    +-n $NAMESPACE
    +

    +
  • +
  • (Optional) Re-run the pipeline using last pipelinerun values +
    tkn pipeline start test-build-push --last -n $NAMESPACE
    +
  • +
+

Deploy Application

+
    +
  • Create a deployment +
    oc create deployment cloudnative --image=${IMAGE_URL} -n $NAMESPACE
    +
  • +
  • Verify if the pods are running: +
    oc get pods -l app=cloudnative -n $NAMESPACE
    +
  • +
  • Expose the deployment as a service +
    oc expose deployment cloudnative --port=9080 -n $NAMESPACE
    +
  • +
  • Expose the service as a route +
    oc expose service cloudnative -n $NAMESPACE
    +
  • +
  • Now access the compose the URL of the App using IP and NodePort +
    export APP_URL="$(oc get route cloudnative --template 'http://{{.spec.host}}')/greeting?name=Carlos"
    +echo APP_URL=$APP_URL
    +
    +
    http://cloudnative-tekton-demo.apps-crc.testing/greeting?name=Carlos
    +
  • +
  • Now access the app from terminal or browser +
    curl $APP_URL
    +
    + Output should be +
    {"id":4,"content":"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)"}
    +
    +
    open $APP_URL
    +
  • +
+
+
+

Prerequisites

+

Make sure your environment is properly setup.

+

Follow the instructions here

+

SetUp

+

Tekton CLI Installation

+
    +
  • +

    Tekton CLI is command line utility used to interact with the Tekton resources.

    +
  • +
  • +

    Follow the instructions on the tekton CLI github repository https://github.com/tektoncd/cli#installing-tkn

    +
  • +
  • +

    For MacOS for example you can use brew +

    brew install tektoncd-cli
    +

    +
  • +
  • Verify the Tekton cli +
    tkn version
    +
  • +
  • The command should show a result like: +
    $ tkn version
    +Client version: 0.10.0
    +
  • +
  • If you already have the tkn install you can upgrade running +
    brew upgrade tektoncd/tools/tektoncd-cli
    +
  • +
+

Tekton Pipelines Installation

+
    +
  • To deploy the Tekton pipelines: +
    kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.13.2/release.yaml
    +
  • +
  • Note: It will take few mins for the Tekton pipeline components to be installed, you an watch the status using the command: +
    kubectl get pods -n tekton-pipelines -w
    +
    + You can use Ctrl+c to terminate the watch
  • +
  • A successful deployment of Tekton pipelines will show the following pods: +
    NAME                                         READY   STATUS    RESTARTS   AGE
    +tekton-pipelines-controller-9b8cccff-j6hvr   1/1     Running   0          2m33s
    +tekton-pipelines-webhook-6fc9d4d9b6-kpkp7    1/1     Running   0          2m33s
    +
  • +
+

Tekton Dashboard Installation (Optional)

+
    +
  • To deploy the Tekton dashboard: +
    kubectl apply --filename https://github.com/tektoncd/dashboard/releases/download/v0.7.0/tekton-dashboard-release.yaml
    +
  • +
  • Note: It will take few mins for the Tekton dashboard components to be installed, you an watch the status using the command: +
    kubectl get pods -n tekton-pipelines -w
    +
    + You can use Ctrl+c to terminate the watch
  • +
  • A successful deployment of Tekton pipelines will show the following pods: +
    NAME                                           READY   STATUS    RESTARTS   AGE
    +tekton-dashboard-59c7fbf49f-79f7q              1/1     Running   0          50s
    +tekton-pipelines-controller-6b7f7cf7d8-r65ps   1/1     Running   0          15m
    +tekton-pipelines-webhook-7bbd8fcc45-sfgxs      1/1     Running   0          15m
    +
  • +
  • Access the dashboard as follows: +
    kubectl --namespace tekton-pipelines port-forward svc/tekton-dashboard 9097:9097
    +
    + You can access the web UI at http://localhost:9097 .
  • +
+

Create Target Namespace

+
    +
  • Set the environment variable NAMESPACE to tekton-demo, if you open a new terminal remember to set this environment again +
    export NAMESPACE=tekton-demo
    +
  • +
  • Create a the namespace using the variable NAMESPACE +
    kubectl create namespace $NAMESPACE
    +
  • +
+

Tasks

+

Task Creation

+
    +
  • Create the below yaml files.
  • +
  • The following snippet shows what a Tekton Task YAML looks like:
  • +
  • +

    Create the file task-test.yaml +

    apiVersion: tekton.dev/v1beta1
    +kind: Task
    +metadata:
    +name: java-test
    +spec:
    +params:
    +    - name: url
    +    - name: revision
    +    default: master
    +steps:
    +    - name: git-clone
    +    image: alpine/git
    +    script: |
    +        git clone -b $(params.revision) --depth 1 $(params.url) /source
    +    volumeMounts:
    +        - name: source
    +        mountPath: /source
    +    - name: test
    +    image: maven:3.3-jdk-8
    +    workingdir: /source
    +    script: |
    +        mvn test
    +        echo "tests passed with rc=$?"
    +    volumeMounts:
    +        - name: m2-repository
    +        mountPath: /root/.m2
    +        - name: source
    +        mountPath: /source
    +volumes:
    +    - name: m2-repository
    +    emptyDir: {}
    +    - name: source
    +    emptyDir: {}
    +

    +
  • +
  • +

    Each Task has the following:

    +
  • +
  • name - the unique name using which the task can be referred
      +
    • name - the name of the parameter
    • +
    • description - the description of the parameter
    • +
    • default - the default value of parameter
    • +
    +
  • +
  • +

    Note: The TaskRun or PipelineRun could override the parameter values, if no parameter value is passed then the default value will be used.

    +
  • +
  • +

    steps - One or more sub-tasks that will be executed in the defined order. The step has all the attributes like a Pod spec

    +
  • +
  • volumes - the task can also mount external volumes using the volumes attribute.
  • +
  • The parameters that were part of the spec inputs params can be used in the steps using the notation $(<variable-name>).
  • +
+

Task Deploy

+
    +
  • +

    The application test task could be created using the command: +

    kubectl apply -f task-test.yaml -n $NAMESPACE
    +

    +
  • +
  • +

    We will use the Tekton cli to inspect the created resources +

    tkn task ls -n $NAMESPACE
    +

    +
  • +
  • +

    The above command should list one Task as shown below: +

    NAME        AGE
    +java-test   22 seconds ago
    +

    +
  • +
+

TaskRun

+
    +
  • The TaskRun is used to run a specific task independently. In the following section we will run the build-app task created in the previous step
  • +
+

TaskRun Creation

+
    +
  • The following snippet shows what a Tekton TaskRun YAML looks like:
  • +
  • Create the file taskrun-test.yaml +
    apiVersion: tekton.dev/v1beta1
    +kind: TaskRun
    +metadata:
    +generateName: test-task-run-
    +spec:
    +taskRef:
    +    name: java-test
    +params:
    +    - name: url
    +    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +
  • +
  • generateName - since the TaskRun can be run many times, in order to have unqiue name across the TaskRun ( helpful when checking the TaskRun history) we use this generateName instead of name. When Kubernetes sees generateName it will generate unquie set of characters and suffix the same to build-app-, similar to how pod names are generated
  • +
  • taskRef - this is used to refer to the Task by its name that will be run as part of this TaskRun. In this example we use build-app Task.
  • +
  • As described in the earlier section that the Task inputs and outputs could be overridden via TaskRun.
  • +
  • params - this are the parameter values that are passed to the task
  • +
  • The application test task(java-maven-test) could be run using the command: +
    kubectl create -n $NAMESPACE -f taskrun-test.yaml
    +
  • +
  • Note - As tasks will use generated name, never use kubectl apply -f taskrun-test.yaml
  • +
  • +

    We will use the Tekton cli to inspect the created resources: +

    tkn tr ls -n $NAMESPACE
    +
    + The above command should list one TaskRun as shown below: +
    NAME                       STARTED        DURATION   STATUS
    +test-task-run-q6s8c        1 minute ago   ---        Running(Pending)
    +
    + Note - It will take few seconds for the TaskRun to show status as Running as it needs to download the container images.

    +
  • +
  • +

    To check the logs of the Task Run using the tkn: +

    tkn tr logs -f -a -n $NAMESPACE
    +
    +Note - Each task step will be run within a container of its own. +The -f or -a allows to tail the logs from all the containers of the task. For more options run tkn tr logs --help

    +
  • +
  • If you see the TaskRun status as Failed or Error use the following command to check the reason for error: +
    tkn tr describe --last -n $NAMESPACE
    +
  • +
  • If it is successful, you will see something like below. +
    tkn tr ls -n $NAMESPACE
    +
    + The above command should list one TaskRun as shown below: +
    NAME                  STARTED          DURATION     STATUS
    +test-task-run-q6s8c   47 seconds ago   34 seconds   Succeeded
    +
  • +
+

Creating additional tasks and deploying them

+
    +
  • Create a Task to build a container image and push to the registry
  • +
  • This task will be later used by the pipeline.
  • +
  • Download the task file task-buildah.yaml to build the image, push the image to the registy:
  • +
  • Create task buildah
  • +
  • Create the buildah Task using the file and the command: +
    kubectl apply -f task-buildah.yaml -n $NAMESPACE
    +
  • +
  • Use the Tekton cli to inspect the created resources +
    tkn task ls -n $NAMESPACE
    +
  • +
  • +

    The above command should list one Task as shown below: +

    NAME              AGE
    +buildah            4 seconds ago
    +java-test         46 minutes ago
    +

    +
  • +
  • +

    To access the container registry, create the required secret as follows.

    +
  • +
  • If using IBM Container registry use iamapikey for REGISTRY_USERNAME and get a API Key for REGISTRY_PASSWORD, use the domain name for the region IBM CR service like us.icr.io
  • +
  • +

    Create the environment variables to be use, replace with real values and include the single quotes: +

    export REGISTRY_USERNAME='<REGISTRY_USERNAME>'
    +
    +
    export REGISTRY_PASSWORD='<REGISTRY_PASSWORD>'
    +
    +
    export REGISTRY_SERVER='docker.io'
    +

    +
  • +
  • +

    Run the following command to create a secret regcred in the namespace NAMESPACE +

    kubectl create secret docker-registry regcred \
    +--docker-server=${REGISTRY_SERVER} \
    +--docker-username=${REGISTRY_USERNAME} \
    +--docker-password=${REGISTRY_PASSWORD} \
    +-n ${NAMESPACE}
    +

    +

    +Before creating, replace the values as mentioned above. +Note: If your docker password contains special characters in it, please enclose the password in double quotes or place an escape character before each special character. +

    +
      +
    • (Optional) Only if you have problems with the credentials you can recreate it, but you have to deleted first +
      kubectl delete secret regcred -n $NAMESPACE
      +
    • +
    +
  • +
  • +

    Before we run the Task using TaskRun let us create the Kubernetes service account and attach the needed permissions to the service account, the following Kubernetes resource defines a service account called pipeline in namespace $NAMESPACE who will have administrative role within the $NAMESPACE namespace.

    +
  • +
  • Create the file sa.yaml +
    apiVersion: v1
    +kind: ServiceAccount
    +metadata:
    +name: pipeline
    +secrets:
    +- name: regcred
    +
  • +
  • +

    Create sa role as follows: +

    kubectl create -n $NAMESPACE -f sa.yaml
    +

    +
  • +
  • +

    Create an environment variable for location to push the image to be build. Replace NAMESPACE for the dockerhub username, or IBM CR Namespace +

    export NAMESPACE='<REGISTRY_NAMESPACE>'
    +export IMAGE_URL=${REGISTRY_SERVER}/${REGISTRY_NAMESPACE}/cloudnative_sample_app
    +

    +
  • +
  • +

    Lets create a Task Run for buildah Task using the tkn CLI passing the inputs, outputs and service account. +

    tkn task start buildah --showlog \
    +-p url=https://github.com/ibm-cloud-architecture/cloudnative_sample_app \
    +-p image=${IMAGE_URL} \
    +-s pipeline \
    +-n $NAMESPACE
    +

    +

    The task will start and logs will start printing automatically +

    Taskrun started: buildah-run-vvrg2
    +Waiting for logs to be available...
    +

    +
  • +
  • +

    Verify the status of the Task Run +

    tkn tr ls -n $NAMESPACE
    +
    + Output should look like this +
    NAME                  STARTED          DURATION     STATUS
    +buildah-run-zbsrv      2 minutes ago    1 minute     Succeeded
    +

    +
  • +
  • To clean up all Pods associated with all Task Runs, delete all the task runs resources +
    kubectl delete taskrun --all -n $NAMESPACE
    +
  • +
  • (Optional) Instead of starting the Task via tkn task start you could also use yaml TaskRun, create a file taskrun-buildah.yaml Make sure update value for parameter image with your registry info. +
    apiVersion: tekton.dev/v1beta1
    +kind: TaskRun
    +metadata:
    +generateName: buildah-task-run-
    +spec:
    +serviceAccountName: pipeline
    +taskRef:
    +    name: buildah
    +params:
    +    - name: url
    +    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +    - name: image
    +    value: docker.io/csantanapr/cloudnative_sample_app
    +
    + Then create the TaskRun with generateName +
    kubectl create -f taskrun-buildah.yaml -n $NAMESPACE
    +
    + Follow the logs with: +
    tkn tr logs --last -f -n $NAMESPACE
    +
  • +
+

Pipelines

+

Pipeline Creation

+
    +
  • +

    Pipelines allows to start multiple Tasks, in parallel or in a certain order

    +
  • +
  • +

    Create the file pipeline.yaml, the Pipeline contains two Tasks +

    apiVersion: tekton.dev/v1beta1
    +kind: Pipeline
    +metadata:
    +name: test-build
    +spec:
    +params:
    +    - name: repo-url
    +    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app
    +    - name: revision
    +    default: master
    +    - name: image-server
    +    - name: image-namespace
    +    - name: image-repository
    +    default: cloudnative_sample_app
    +tasks:
    +    - name: test
    +    taskRef:
    +        name: java-test
    +    params:
    +        - name: url
    +        value: $(params.repo-url)
    +        - name: revision
    +        value: $(params.revision)
    +    - name: build
    +    runAfter: [test]
    +    taskRef:
    +        name: buildah
    +    params:
    +        - name: image
    +        value: $(params.image-server)/$(params.image-namespace)/$(params.image-repository)
    +        - name: url
    +        value: $(params.repo-url)
    +        - name: revision
    +        value: $(params.revision)
    +

    +
  • +
  • +

    Pipeline defines a list of Tasks to execute in order, while also indicating if any outputs should be used as inputs of a following Task by using the from field and also indicating the order of executing (using the runAfter and from fields). The same variable substitution you used in Tasks is also available in a Pipeline.

    +
  • +
  • Create the Pipeline using the command: +
    kubectl apply -f pipeline.yaml -n $NAMESPACE
    +
  • +
  • Use the Tekton cli to inspect the created resources +
    tkn pipeline ls -n $NAMESPACE
    +
    +The above command should list one Pipeline as shown below: +
    NAME              AGE              LAST RUN   STARTED   DURATION   STATUS
    +test-build-push   31 seconds ago   ---        ---       ---        ---
    +
  • +
+

PipelineRun

+

PipelineRun Creation

+
    +
  • To execute the Tasks in the Pipeline, you must create a PipelineRun. Creation of a PipelineRun will trigger the creation of TaskRuns for each Task in your pipeline.
  • +
  • Create the file pipelinerun.yaml replace the values for image-server and image-namespace with your own. +
    apiVersion: tekton.dev/v1beta1
    +kind: PipelineRun
    +metadata:
    +generateName: test-build-run-
    +spec:
    +serviceAccountName: pipeline
    +pipelineRef:
    +    name: test-build
    +params:
    +    - name: image-server
    +    value: us.icr.io
    +    - name: image-namespace
    +    value: student01-registry
    +
    + serviceAccount - it is always recommended to have a service account associated with PipelineRun, which can then be used to define fine grained roles. + Replace the values for image-server and image-namespace
  • +
  • Create the PipelineRun using the command: +
    kubectl create -f pipelinerun.yaml -n $NAMESPACE
    +
  • +
  • +

    We will use the Tekton cli to inspect the created resources +

    tkn pipelinerun ls -n $NAMESPACE
    +

    +
  • +
  • +

    The above command should list one PipelineRun as shown below: +

    NAME                        STARTED         DURATION   STATUS
    +test-build-push-run-c7zgv   8 seconds ago   ---        Running
    +

    +
  • +
  • +

    Get the logs of the pipeline using the following command +

    tkn pipelinerun logs --last -f
    +

    +
  • +
  • Wait for few minutes for your pipeline to complete all the tasks. If it is successful, you will see something like below. +
    tkn pipeline ls -n $NAMESPACE
    +
    +
    NAME              AGE              LAST RUN                    STARTED         DURATION    STATUS
    +test-build-push   33 minutes ago   test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded
    +
  • +
  • +

    Run again the pipeline ls command +

    tkn pipelinerun ls -n $NAMESPACE
    +
    +
    NAME                        STARTED         DURATION    STATUS
    +test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded
    +
    + If it is successful, go to your container registry account and verify if you have the cloudnative_sample_app image pushed.

    +
  • +
  • +

    (Optional) Run the pipeline again using the tkn CLI +

    tkn pipeline start test-build --last -n $NAMESPACE
    +

    +
  • +
  • (Optional) Re-run the pipeline using last pipelinerun values +
    tkn pipeline start test-build-push --last -f -n $NAMESPACE
    +
  • +
+

Deploy Application

+
    +
  • Add the imagePullSecret to the default Service Account +
    kubectl patch sa default -p '"imagePullSecrets": [{"name": "regcred" }]' -n $NAMESPACE
    +
  • +
  • Create a deployment +
    kubectl create deployment cloudnative --image=${IMAGE_URL} -n $NAMESPACE
    +
  • +
  • Verify if the pods are running: +
    kubectl get pods -l app=cloudnative -n $NAMESPACE
    +
  • +
  • Expose the deployment +
    kubectl expose deployment cloudnative --type=NodePort --port=9080 -n $NAMESPACE
    +
  • +
  • Now access the compose the URL of the App using IP and NodePort +
    export APP_EXTERNAL_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}')
    +export APP_NODEPORT=$(kubectl get svc cloudnative -n $NAMESPACE -o jsonpath='{.spec.ports[0].nodePort}')
    +export APP_URL="http://${APP_EXTERNAL_IP}:${APP_NODEPORT}/greeting?name=Carlos"
    +echo APP_URL=$APP_URL
    +
    +
    http://192.168.64.30:30632//greeting?name=Carlos
    +
  • +
  • Now access the app from terminal or browser +
    curl $APP_URL
    +
    + Output should be +
    {"id":4,"content":"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)"}
    +
    +
    open $APP_URL
    +
  • +
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/index.html b/labs/index.html new file mode 100644 index 0000000..45fed07 --- /dev/null +++ b/labs/index.html @@ -0,0 +1,1791 @@ + + + + + + + + + + + + + + + + + + + + + + + Activities - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Activities

+

Containers

+ + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
IBM Container RegistryBuild and Deploy Run using IBM Container RegistryIBM Container Registry
Docker LabRunning a Sample Application on DockerDocker Lab
+

Kubernetes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
Pod CreationChallenge yourself to create a Pod YAML file to meet certain parameters.Pod Creation
Pod ConfigurationConfigure a pod to meet compute resource requirements.Pod Configuration
Multiple ContainersBuild a container using legacy container image.Multiple Containers
ProbesCreate some Health & Startup Probes to find what's causing an issue.Probes
DebuggingFind which service is breaking in your cluster and find out why.Debugging
Rolling Updates LabCreate a Rolling Update for your application.Rolling Updates
Cron Jobs LabUsing Tekton to test new versions of applications.Crons Jobs
Creating ServicesCreate two services with certain requirements.Setting up Services
Setting up Persistent VolumesCreate a Persistent Volume that's accessible from a SQL Pod.Setting up Persistent Volumes
IKS Ingress ControllerConfigure Ingress on Free IKS ClusterSetting IKS Ingress
Solutions
Lab SolutionsSolutions for the Kubernetes LabsSolutions
+

Continuous Integration

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Walkthroughs
Deploying Applications From SourceUsing OpenShift 4S2I
Try It Yourself
Tekton LabUsing Tekton to test new versions of applications.Tekton
IBM Cloud DevOpsUsing IBM Cloud ToolChain with TektonTekton on IBM Cloud
Jenkins LabUsing Jenkins to test new versions of applications.Jenkins
+

Continuous Deployment

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Walkthroughs
GitOpsIntroduction to GitOps with OpenShiftLearn OpenShift
GitOps Multi-clusterMulti-cluster GitOps with OpenShiftLearn OpenShift
Try It Yourself
ArgoCD LabLearn how to setup ArgoCD and Deploy ApplicationArgoCD
+

Projects

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
Cloud Native ChallengeDeploy your own app using what we have learnedCN Challenge
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/ingress-iks/index.html b/labs/kubernetes/ingress-iks/index.html new file mode 100644 index 0000000..2a8e7e6 --- /dev/null +++ b/labs/kubernetes/ingress-iks/index.html @@ -0,0 +1,1715 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab Ingress Controller IBM Free Kubernetes cluster - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 11 - Ingress IKS

+ +

The IBM Kubernetes service free clusters consist of a single worker node with 2 CPU and 4 GB of memory for experimenting with Kubernetes. Unlike the fee-based service, these clusters do not include capabilities for application load balancing using ingress out-of-the-box.

+

Prerequisites

+
    +
  • Free IBM Kubernetes Cluster (IKS) - upgrade your account from Lite plan to create one. In the example commands, we'll assume that this cluster is named mycluster
  • +
  • kubectl - match your cluster API version
  • +
  • Log in to IBM Cloud and configure kubectl using the ibmcloud ks cluster config --cluster mycluster command
  • +
+

Components

+

On the IKS cluster, you will install helm charts for a nginx ingress controller from NGINX. This lab already provides the templated yaml files so there is no need to use helm cli.

+

Set up the ingress controller

+

Only do this on a free IKS instance These steps assume facts that only apply to free IKS instances:

+
    +
  • a single worker where the cluster administrator can create pods that bind to host ports
  • +
  • no pre-existing ingress controller or application load balancer
  • +
+

Using the following steps with a paid instance can cause issues. See the IBM Cloud containers documentation for information on exposing applications with the ingress/alb services for paid clusters. You have been warned

+
    +
  1. +

    Install the NGINX ingress controller with helm using a daemonset and no service resource (which will result in a single pod that binds to ports 80 and 443 on the worker node and will skip creation of a ClusterIP, LoadBalancer, or NodePort for the daemonset). +

    kubectl apply -f https://cloudnative101.dev/yamls/ingress-controller/iks-ingress-v1.7.1.yaml
    +

    +
  2. +
  3. +

    You can use free domain .nip.io to get a domain name using one of the IP Address of your worker nodes. Run this command to set your DOMAIN +

    export DOMAIN=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}').nip.io
    +echo $DOMAIN
    +

    +
  4. +
  5. +

    You can test the ingress controller using the $DOMAIN:

    +

    curl -I http://$DOMAIN
    +
    +
    HTTP/1.1 404 Not Found
    +Server: nginx/1.17.10
    +...
    +

    +

    A 404 is expected at this point because unlike the kubernetes nginx ingress, the NGINX version of the ingress controller does not create a default backend deployment.

    +
  6. +
  7. +

    To use the ingress controller deploy a sample application, expose a service. +

    kubectl create deployment web --image=bitnami/nginx
    +kubectl expose deployment web --name=web --port 8080
    +

    +
  8. +
  9. +

    Now create an Ingress resource +

    cat <<EOF | kubectl apply -f -
    +apiVersion: networking.k8s.io/v1beta1
    +kind: Ingress
    +metadata:
    +  name: web
    +  labels:
    +    app: web
    +spec:
    +  rules:
    +    - host: web.$DOMAIN
    +      http:
    +        paths:
    +          - path: /
    +            backend:
    +              serviceName: web
    +              servicePort: 8080
    +EOF
    +echo "Access your web app at http://web.$DOMAIN"
    +

    +
  10. +
  11. +

    List the created ingress +

    kubectl get ingress web
    +

    +
  12. +
  13. +

    Access your web application +

    curl http://web.$DOMAIN
    +
    + The output prints the html +
    <p><em>Thank you for using nginx.</em></p>
    +

    +
  14. +
  15. +

    Delete all the resources created +

    kubectl delete deployment,svc,ingress -l app=web
    +

    +
  16. +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab-solutions/index.html b/labs/kubernetes/lab-solutions/index.html new file mode 100644 index 0000000..8bb722c --- /dev/null +++ b/labs/kubernetes/lab-solutions/index.html @@ -0,0 +1,1575 @@ + + + + + + + + + + + + + + + + + + + + + + + Solutions - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab1/index.html b/labs/kubernetes/lab1/index.html new file mode 100644 index 0000000..c1e7b70 --- /dev/null +++ b/labs/kubernetes/lab1/index.html @@ -0,0 +1,1628 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 1 - Pod Creation - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 1 - Pod Creation

+ +

Problem

+
    +
  • Write a pod definition named yoda-service-pod.yml Then create a pod in the cluster using this definition to make sure it works.
  • +
+

The specificationsof this pod are as follows:

+
    +
  • Use the bitnami/nginx container image.
  • +
  • The container needs a containerPort of 80.
  • +
  • Set the command to run as nginx
  • +
  • Pass in the -g daemon off; -q args to run nginx in quiet mode.
  • +
  • Create the pod in the web namespace.
  • +
+

Verification

+

When you have completed this lab, use the following commands to validate your solution. The 'get pods' command will

+

kubectl get pods -n web +kubectl describe pod nginx -n web

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab1/solution/index.html b/labs/kubernetes/lab1/solution/index.html new file mode 100644 index 0000000..4a5133c --- /dev/null +++ b/labs/kubernetes/lab1/solution/index.html @@ -0,0 +1,839 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 1 - Pod Creation - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 1 - Pod Creation

+ +

Solution

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx
+  namespace: web
+spec:
+  containers:
+  - name: nginx
+    image: nginx
+    command: ["nginx"]
+    args: ["-g", "daemon off;", "-q"]
+    ports:
+    - containerPort: 80
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab10/index.html b/labs/kubernetes/lab10/index.html new file mode 100644 index 0000000..41f3a24 --- /dev/null +++ b/labs/kubernetes/lab10/index.html @@ -0,0 +1,1643 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 10 - Persistent Volumes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 10 - Persistent Volumes

+ +

Problem

+

The death star plans can't be lost no matter what happens so we need to make sure we protect them at all costs.

+

In order to do that you will need to do the following:

+

Create a PersistentVolume:

+
    +
  • +

    The PersistentVolume should be named postgresql-pv.

    +
  • +
  • +

    The volume needs a capacity of 1Gi.

    +
  • +
  • +

    Use a storageClassName of localdisk.

    +
  • +
  • +

    Use the accessMode ReadWriteOnce.

    +
  • +
  • +

    Store the data locally on the node using a hostPath volume at the location /mnt/data.

    +
  • +
+

Create a PersistentVolumeClaim:

+
    +
  • +

    The PersistentVolumeClaim should be named postgresql-pv-claim.

    +
  • +
  • +

    Set a resource request on the claim for 500Mi of storage.

    +
  • +
  • +

    Use the same storageClassName and accessModes as the PersistentVolume so that this claim can bind to the PersistentVolume.

    +
  • +
+

Create a Postgresql Pod configured to use the PersistentVolumeClaim: +- The Pod should be named postgresql-pod.

+
    +
  • +

    Use the image bitnami/postgresql.

    +
  • +
  • +

    Expose the containerPort 5432.

    +
  • +
  • +

    Set an environment variable called MYSQL_ROOT_PASSWORD with the value password.

    +
  • +
  • +

    Add the PersistentVolumeClaim as a volume and mount it to the container at the path /bitnami/postgresql/.

    +
  • +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab10/solution/index.html b/labs/kubernetes/lab10/solution/index.html new file mode 100644 index 0000000..7a01937 --- /dev/null +++ b/labs/kubernetes/lab10/solution/index.html @@ -0,0 +1,872 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 10 - Persistent Volumes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 10 - Persistent Volumes

+ +

Solution

+
apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: postgresql-pv
+spec:
+  storageClassName: localdisk
+  capacity:
+    storage: 1Gi
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: "/mnt/data"
+
+
apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: postgresql-pv-claim
+spec:
+  storageClassName: localdisk
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 500Mi
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: postgresql-pod
+spec:
+  containers:
+  - name: postgresql
+    image: bitnami/postgresql
+    ports:
+    - containerPort: 5432
+    env:
+    - name: MYSQL_ROOT_PASSWORD
+      value: password
+    volumeMounts:
+    - name: sql-storage
+      mountPath: /bitnami/postgresql/
+  volumes:
+  - name: sql-storage
+    persistentVolumeClaim:
+      claimName: postgresql-pv-claim
+
+

verify via ls /mnt/data on node

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab2/index.html b/labs/kubernetes/lab2/index.html new file mode 100644 index 0000000..4ca53a1 --- /dev/null +++ b/labs/kubernetes/lab2/index.html @@ -0,0 +1,1636 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 2 - Pod Configuration - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 2 - Pod Configuration

+ +

Problem

+
    +
  • Create a pod definition named yoda-service-pod.yml, and then create a pod in the cluster using this definition to make sure it works.
  • +
+

The specifications are as follows:

+
    +
  • The current image for the container is bitnami/nginx. You do not need a custom command or args.
  • +
  • There is some configuration data the container will need:
      +
    • yoda.baby.power=100000000
    • +
    • yoda.strength=10
    • +
    +
  • +
  • It will expect to find this data in a file at /etc/yoda-service/yoda.cfg. Store the configuration data in a ConfigMap called yoda-service-config and provide it to the container as a mounted volume.
  • +
  • The container should expect to use 64Mi of memory and 250m CPU (use resource requests).
  • +
  • The container should be limited to 128Mi of memory and 500m CPU (use resource limits).
  • +
  • The container needs access to a database password in order to authenticate with a backend database server. The password is 0penSh1ftRul3s!. It should be stored as a Kubernetes secret called yoda-db-password and passed to the container as an environment variable called DB_PASSWORD.
  • +
  • The container will need to access the Kubernetes API using the ServiceAccount yoda-svc. Create the service account if it doesn't already exist, and configure the pod to use it.
  • +
+

Verification

+

To verify your setup is complete, check /etc/yoda-service for the yoda.cfg file and use the cat command to check it's contents.

+
kubectl exec -it yoda-service /bin/bash
+cd /etc/yoda-service
+cat yoda.cfg
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab2/solution/index.html b/labs/kubernetes/lab2/solution/index.html new file mode 100644 index 0000000..6541614 --- /dev/null +++ b/labs/kubernetes/lab2/solution/index.html @@ -0,0 +1,876 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 2 - Pod Configuration - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 2 - Pod Configuration

+ +

Solution

+
apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: yoda-service-config
+data:
+  yoda.cfg: |-
+    yoda.baby.power=100000000
+    yoda.strength=10
+
+
apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: yoda-svc
+
+
apiVersion: v1
+kind: Secret
+metadata:
+  name: yoda-db-password
+stringData:
+  password: 0penSh1ftRul3s!
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: yoda-service
+spec:
+  serviceAccountName: yoda-svc
+  containers:
+  - name: yoda-service
+    image: bitnami/nginx
+    volumeMounts:
+      - name: config-volume
+        mountPath: /etc/yoda-service
+    env:
+    - name: DB_PASSWORD
+      valueFrom:
+        secretKeyRef:
+          name: yoda-db-password
+          key: password
+    resources:
+      requests:
+        memory: "64Mi"
+        cpu: "250m"
+      limits:
+        memory: "128Mi"
+        cpu: "500m"
+  volumes:
+  - name: config-volume
+    configMap:
+      name: yoda-service-config
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab3/index.html b/labs/kubernetes/lab3/index.html new file mode 100644 index 0000000..bd8cd5b --- /dev/null +++ b/labs/kubernetes/lab3/index.html @@ -0,0 +1,1650 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 3 - Manage Multiple Containers - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 3 - Multiple Containers

+ +

Problem

+

This service has already been packaged into a container image, but there is one special requirement: + - The legacy app is hard-coded to only serve content on port 8989, but the team wants to be able to access the service using the standard port 80.

+

Your task is to build a Kubernetes pod that runs this legacy container and uses the ambassador design pattern to expose access to the service on port 80.

+

This setup will need to meet the following specifications:

+
    +
  • The pod should have the name vader-service.
  • +
  • The vader-service pod should have a container that runs the legacy vader service image: ibmcase/millennium-falcon:1.
  • +
  • The vader-service pod should have an ambassador container that runs the haproxy:1.7 image and proxies incoming traffic on port 80 to the legacy service on port 8989 (the HAProxy configuration for this is provided below).
  • +
  • Port 80 should be exposed as a containerPort.
  • +
+

+

Note: You do not need to expose port 8989

+

+
    +
  • The HAProxy configuration should be stored in a ConfigMap called vader-service-ambassador-config.
  • +
  • The HAProxy config should be provided to the ambassador container using a volume mount that places the data from the ConfigMap in a file at /usr/local/etc/haproxy/haproxy.cfg. +haproxy.cfg should contain the following configuration data:
  • +
+
global
+    daemon
+    maxconn 256
+
+defaults
+    mode http
+    timeout connect 5000ms
+    timeout client 50000ms
+    timeout server 50000ms
+
+listen http-in
+    bind *:80
+    server server1 127.0.0.1:8989 maxconn 32
+
+

Once your pod is up and running, it's a good idea to test it to make sure you can access the service from within the cluster using port 80. In order to do this, you can create a busybox pod in the cluster, and then run a command to attempt to access the service from within the busybox pod.

+

Create a descriptor for the busybox pod called busybox.yml

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: busybox
+spec:
+  containers:
+  - name: myapp-container
+    image: radial/busyboxplus:curl
+    command: ['sh', '-c', 'while true; do sleep 3600; done']
+
+

Create the busybox testing pod. +

kubectl apply -f busybox.yml
+

+

Use this command to access vader-service using port 80 from within the busybox pod. +

kubectl exec busybox -- curl $(kubectl get pod vader-service -o=custom-columns=IP:.status.podIP --no-headers):80
+

+

If the service is working, you should get a message that the hyper drive of the millennium falcon needs repair.

+

Relevant Documentation: +- Kubernetes Sidecar Logging Agent +- Shared Volumes +- Distributed System Toolkit Patterns

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab3/solution/index.html b/labs/kubernetes/lab3/solution/index.html new file mode 100644 index 0000000..b5d07b9 --- /dev/null +++ b/labs/kubernetes/lab3/solution/index.html @@ -0,0 +1,877 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 3 - Manage Multiple Containers - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 3 - Manage Multiple Containers

+ +

Solution

+
apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vader-service-ambassador-config
+data:
+  haproxy.cfg: |-
+    global
+        daemon
+        maxconn 256
+
+    defaults
+        mode http
+        timeout connect 5000ms
+        timeout client 50000ms
+        timeout server 50000ms
+
+    listen http-in
+        bind *:80
+        server server1 127.0.0.1:8775 maxconn 32
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: vader-service
+spec:
+  containers:
+  - name: millennium-falcon
+    image: ibmcase/millennium-falcon:1
+  - name: haproxy-ambassador
+    image: haproxy:1.7
+    ports:
+    - containerPort: 80
+    volumeMounts:
+    - name: config-volume
+      mountPath: /usr/local/etc/haproxy
+  volumes:
+  - name: config-volume
+    configMap:
+      name: vader-service-ambassador-config
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: busybox
+spec:
+  containers:
+  - name: myapp-container
+    image: radial/busyboxplus:curl
+    command: ['sh', '-c', 'while true; do sleep 3600; done']
+
+
kubectl exec busybox -- curl $(kubectl get pod vader-service -o=jsonpath='{.status.podIP}'):80
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab4/index.html b/labs/kubernetes/lab4/index.html new file mode 100644 index 0000000..e04b2a0 --- /dev/null +++ b/labs/kubernetes/lab4/index.html @@ -0,0 +1,1638 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 4 - Probes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 4 - Probes

+ +

Container Health Issues

+

The first issue is caused by application instances entering an unhealthy state and responding to user requests with error messages. Unfortunately, this state does not cause the container to stop, so the Kubernetes cluster is not able to detect this state and restart the container. Luckily, the application has an internal endpoint that can be used to detect whether or not it is healthy. This endpoint is /healthz on port 8080.

+
    +
  • Your first task will be to create a probe to check this endpoint periodically.
  • +
  • If the endpoint returns an error or fails to respond, the probe will detect this and the cluster will restart the container.
  • +
+

Container Startup Issues

+

Another issue is caused by new pods when they are starting up. The application takes a few seconds after startup before it is ready to service requests. As a result, some users are getting error message during this brief time.

+
    +
  • +

    To fix this, you will need to create another probe. To detect whether the application is ready, the probe should simply make a request to the root endpoint, /ready, on port 8080. If this request succeeds, then the application is ready.

    +
  • +
  • +

    Also set a initial delay of 5 seconds for the probes.

    +
  • +
+

Here is the Pod yaml file, add the probes, then create the pod in the cluster to test it.

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: energy-shield-service
+spec:
+  containers:
+  - name: energy-shield
+    image: ibmcase/energy-shield:1
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab4/solution/index.html b/labs/kubernetes/lab4/solution/index.html new file mode 100644 index 0000000..6023f18 --- /dev/null +++ b/labs/kubernetes/lab4/solution/index.html @@ -0,0 +1,843 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 4 - Probes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 4 - Probes

+ +

Solution

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: energy-shield-service
+spec:
+  containers:
+  - name: energy-shield
+    image: ibmcase/energy-shield:1
+    livenessProbe:
+      httpGet:
+        path: /healthz
+        port: 8080
+    readinessProbe:
+      httpGet:
+        path: /ready
+        port: 8080
+      initialDelaySeconds: 5
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab5/index.html b/labs/kubernetes/lab5/index.html new file mode 100644 index 0000000..d5e7ada --- /dev/null +++ b/labs/kubernetes/lab5/index.html @@ -0,0 +1,1634 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 5 - Debugging - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 5 - Debugging

+ +

Problem

+

The Hyper Drive isn't working and we need to find out why. Let's debug the hyper-drive deployment so that we can reach light speed again.

+

Here are some tips to help you solve the Hyper Drive:

+
    +
  • Check the description of the deployment.
  • +
  • Get and save the logs of one of the broken pods.
  • +
  • Are the correct ports assigned.
  • +
  • Make sure your labels and selectors are correct.
  • +
  • Check to see if the Probes are correctly working.
  • +
  • To fix the deployment, save then modify the yaml file for redeployment.
  • +
+

Reset the environment: +

minikube delete
+minikube start
+

+

Setup the environment: +

kubectl apply -f https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/lab-setup/lab-5-debug-k8s-setup.yaml
+

+

Validate

+

Once you get the Hyper Drive working again. Verify it by checking the endpoints.

+
kubectl get ep hyper-drive
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab5/solution/index.html b/labs/kubernetes/lab5/solution/index.html new file mode 100644 index 0000000..57f58be --- /dev/null +++ b/labs/kubernetes/lab5/solution/index.html @@ -0,0 +1,851 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 5 - Debugging - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 5 - Debugging

+ +

Solution

+

Check STATUS column for not Ready +

    kubectl get pods --all-namespaces
+

+

Check the description of the deployment +

kubectl describe deployment hyper-drive
+
+ Save logs for a broken pod

+
kubectl logs <pod name> -n <namespace> > /home/cloud_user/debug/broken-pod-logs.log
+
+

In the description you will see the following is wrong: +- Selector and Label names do not match. +- The Probe is TCP instead of HTTP Get. +- The Service Port is 80 instead of 8080.

+

To fix probe, can't kubectl edit, need to delete and recreate the deployment +

kubectl get deployment <deployment name> -n <namespace> -o yaml --export > hyper-drive.yml
+

+

Delete pod +

kubectl delete deployment <deployment name> -n <namespace>
+
+ Can also use kubectl replace

+

Edit yaml, and apply +

kubectl apply -f hyper-drive.yml -n <namespace>
+

+

Verify +

kubectl get deployment <deployment name> -n <namespace>
+

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab6/index.html b/labs/kubernetes/lab6/index.html new file mode 100644 index 0000000..a74dd90 --- /dev/null +++ b/labs/kubernetes/lab6/index.html @@ -0,0 +1,1600 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 6 - Rolling Updates - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 6 - Rolling Updates

+ +

Problem

+

Your company's developers have just finished developing a new version of their jedi-themed mobile game. They are ready to update the backend services that are running in your Kubernetes cluster. There is a deployment in the cluster managing the replicas for this application. The deployment is called jedi-deployment. You have been asked to update the image for the container named jedi-ws in this deployment template to a new version, bitnamy/nginx:1.18.1.

+

After you have updated the image using a rolling update, check on the status of the update to make sure it is working. If it is not working, perform a rollback to the previous state.

+

Setup environment +

kubectl apply -f https://gist.githubusercontent.com/csantanapr/87df4292e94441617707dae5de488cf4/raw/cb515f7bae77a3f0e76fdc7f6aa0f4e89cc5fec7/lab-6-rolling-updates-setup.yaml
+

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab6/solution/index.html b/labs/kubernetes/lab6/solution/index.html new file mode 100644 index 0000000..e558a7d --- /dev/null +++ b/labs/kubernetes/lab6/solution/index.html @@ -0,0 +1,843 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 6 - Rolling Updates - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 6 - Rolling Updates

+ +

Solution

+

Update the deployment to the new version like so: +

kubectl set image deployment/jedi-deployment jedi-ws=bitnamy/nginx:1.18.1 --record
+

+

Check the progress of the rolling update: +

kubectl rollout status deployment/jedi-deployment
+

+

In another terminal window +

kubectl get pods -w
+

+

Get a list of previous revisions. +

kubectl rollout history deployment/jedi-deployment
+

+

Undo the last revision. +

kubectl rollout undo deployment/jedi-deployment
+

+

Check the status of the rollout. +

kubectl rollout status deployment/jedi-deployment
+

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab7/index.html b/labs/kubernetes/lab7/index.html new file mode 100644 index 0000000..36e4e77 --- /dev/null +++ b/labs/kubernetes/lab7/index.html @@ -0,0 +1,1631 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 7 - Cron Jobs - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 7 - Cron Jobs

+ +

Problem

+

Your commander has a simple data process that is run periodically to check status. They would like to stop doing this manually in order to save time, so you have been asked to implement a cron job in the Kubernetes cluster to run this process. + - Create a cron job called xwing-cronjob using the ibmcase/xwing-status:1.0 image. + - Have the job run every second minute with the following cron expression: */2 * * * *. + - Pass the argument /usr/sbin/xwing-status.sh to the container.

+

Verification

+
    +
  • Run kubectl get cronjobs.batch and LAST-SCHEDULE to see last time it ran
  • +
  • From a bash shell, run the following to see the logs for all jobs:
  • +
+
jobs=( $(kubectl get jobs --no-headers -o custom-columns=":metadata.name") )
+echo -e "Job \t\t\t\t Pod \t\t\t\t\tLog"
+for job in "${jobs[@]}"
+do
+   pod=$(kubectl get pods -l job-name=$job --no-headers -o custom-columns=":metadata.name")
+   echo -en "$job \t $pod \t"
+   kubectl logs $pod
+done
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab7/solution/index.html b/labs/kubernetes/lab7/solution/index.html new file mode 100644 index 0000000..27c2de7 --- /dev/null +++ b/labs/kubernetes/lab7/solution/index.html @@ -0,0 +1,844 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 7 - Cron Jobs - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 7 - Cron Jobs

+ +

Solution

+
apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+  name: xwing-cronjob
+spec:
+  schedule: "*/1 * * * *"
+  jobTemplate:
+    spec:
+      template:
+        spec:
+          containers:
+          - name: xwing-status
+            image: ibmcase/xwing-status:1.0
+            args:
+            - /usr/sbin/xwing-status.sh
+          restartPolicy: OnFailure
+
+
kubectl get cronjob xwing-cronjob
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab8/index.html b/labs/kubernetes/lab8/index.html new file mode 100644 index 0000000..f138734 --- /dev/null +++ b/labs/kubernetes/lab8/index.html @@ -0,0 +1,1661 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 8 - Services - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 8 - Creating Services

+ +

Problem

+

We have a jedi-deployment and yoda-deployment that need to communicate with others. The jedi needs to talk to the world(outside the cluster), while yoda only needs to talk to jedi council(others in the cluster).

+

Your Task

+
    +
  • Examine the two deployments, and create two services that meet the following criteria:
  • +
+

jedi-svc + - The service name is jedi-svc. + - The service exposes the pod replicas managed by the deployment named jedi-deployment. + - The service listens on port 80 and its targetPort matches the port exposed by the pods. + - The service type is NodePort.

+

yoda-svc + - The service name is yoda-svc. + - The service exposes the pod replicas managed by the deployment named yoda-deployment. + - The service listens on port 80 and its targetPort matches the port exposed by the pods. + - The service type is ClusterIP.

+

Setup environment:

+
kubectl apply -f https://gist.githubusercontent.com/csantanapr/87df4292e94441617707dae5de488cf4/raw/cb515f7bae77a3f0e76fdc7f6aa0f4e89cc5fec7/lab-8-service-setup.yaml
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab8/solution/index.html b/labs/kubernetes/lab8/solution/index.html new file mode 100644 index 0000000..66560a3 --- /dev/null +++ b/labs/kubernetes/lab8/solution/index.html @@ -0,0 +1,851 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 8 - Services - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 8 - Services

+ +

Solution

+
apiVersion: v1
+kind: Service
+metadata:
+  name: jedi-svc
+spec:
+  type: NodePort
+  selector:
+    app: jedi
+  ports:
+  - protocol: TCP
+    port: 80
+    targetPort: 8080
+
+
apiVersion: v1
+kind: Service
+metadata:
+  name: yoda-svc
+spec:
+  type: ClusterIP
+  selector:
+    app: yoda
+  ports:
+  - protocol: TCP
+    port: 80
+    targetPort: 8080
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab9/index.html b/labs/kubernetes/lab9/index.html new file mode 100644 index 0000000..1bb7755 --- /dev/null +++ b/labs/kubernetes/lab9/index.html @@ -0,0 +1,1627 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes Lab 9 - Network Policies - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Lab 9 - Network Policies

+ +

Problem

+

Setup minikube

+
minikube start --network-plugin=cni
+kubectl apply -f https://docs.projectcalico.org/v3.9/manifests/calico.yaml
+kubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true
+kubectl -n kube-system get pods | grep calico-node
+
+

Create secured pod +

apiVersion: v1
+kind: Pod
+metadata:
+  name: network-policy-secure-pod
+  labels:
+    app: secure-app
+spec:
+  containers:
+  - name: nginx
+    image: bitnami/nginx
+    ports:
+    - containerPort: 8080
+

+

Create client pod +

apiVersion: v1
+kind: Pod
+metadata:
+  name: network-policy-client-pod
+spec:
+  containers:
+  - name: busybox
+    image: radial/busyboxplus:curl
+    command: ["/bin/sh", "-c", "while true; do sleep 3600; done"]
+

+

Create a policy to allow only client pods with label allow-access: "true" to access secure pod

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/labs/kubernetes/lab9/solution/index.html b/labs/kubernetes/lab9/solution/index.html new file mode 100644 index 0000000..52b24c9 --- /dev/null +++ b/labs/kubernetes/lab9/solution/index.html @@ -0,0 +1,841 @@ + + + + + + + + + + + + + + + + + + + Kubernetes Lab 9 - Network Policies - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes Lab 9 - Network Policies

+ +

Solution

+
apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+  name: my-network-policy
+spec:
+  podSelector:
+    matchLabels:
+      app: secure-app
+  policyTypes:
+  - Ingress
+  ingress:
+  - from:
+    - podSelector:
+        matchLabels:
+          allow-access: "true"
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/configuration/config-map/index.html b/openshift/configuration/config-map/index.html new file mode 100644 index 0000000..b92c09c --- /dev/null +++ b/openshift/configuration/config-map/index.html @@ -0,0 +1,1879 @@ + + + + + + + + + + + + + + + + + + + + + + + Config Maps - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Config Maps

+

ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable.

+

You can data from a ConfigMap in 3 different ways. +- As a single environment variable specific to a single key +- As a set of environment variables from all keys +- As a set of files, each key represented by a file on mounted volume

+

Resources

+
+ +
+

References

+
apiVersion: v1
+kind: ConfigMap
+metadata:
+   name: my-cm
+data:
+   color: blue
+   location: naboo
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  restartPolicy: Never
+  containers:
+    - name: myapp
+      image: busybox
+      command: ["echo"]
+      args: ["color is $(MY_VAR)"]
+      env:
+        - name: MY_VAR
+          valueFrom:
+            configMapKeyRef:
+              name: my-cm
+              key: color
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  restartPolicy: Never
+  containers:
+    - name: myapp
+      image: busybox
+      command:
+        [
+          "sh",
+          "-c",
+          "ls -l /etc/config; echo located at $(cat /etc/config/location)",
+        ]
+      volumeMounts:
+        - name: config-volume
+          mountPath: /etc/config
+  volumes:
+    - name: config-volume
+      configMap:
+        name: my-cm
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  restartPolicy: Never
+  containers:
+    - name: myapp
+      image: busybox
+      command: ["/bin/sh", "-c", "env | sort"]
+      envFrom:
+        - configMapRef:
+            name: my-cm
+  restartPolicy: Never
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/configuration/index.html b/openshift/configuration/index.html new file mode 100644 index 0000000..dc907c8 --- /dev/null +++ b/openshift/configuration/index.html @@ -0,0 +1,1995 @@ + + + + + + + + + + + + + + + + + + + + + + + Container Configuration - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Container Configuration

+

Command and Argument

+

When you create a Pod, you can define a command and arguments for the containers that run in the Pod.

+

The command and arguments that you define in the configuration file override the default command and arguments provided by the container image

+

Dockerfile vs Kubernetes +Dockerfile Entrypoint -> k8s command +Dockerfile CMD -> k8s args

+

Ports

+

When you create a Pod, you can specify the port number the container exposes, as best practice is good to put a name, this way a service can specify targetport by name reference.

+

Environment Variable

+

When you create a Pod, you can set environment variables for the containers that run in the Pod. To set environment variables, include the env or envFrom field in the container configuration

+

A Pod can use environment variables to expose information about itself to Containers running in the Pod. Environment variables can expose Pod fields and Container fields

+

Resources

+ +

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-cmd-pod
+spec:
+  containers:
+  - name: myapp-container
+    image: busybox
+    command: ['echo']
+  restartPolicy: Never
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-arg-pod
+spec:
+  containers:
+  - name: myapp-container
+    image: busybox
+    command: ['echo']
+    args: ['Hello World']
+  restartPolicy: Never
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-port-pod
+spec:
+  containers:
+  - name: myapp-container
+    image: bitnami/nginx
+    ports:
+    - containerPort: 8080
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-env-pod
+spec:
+  restartPolicy: Never
+  containers:
+  - name: c
+    image: busybox
+    env:
+    - name: DEMO_GREETING
+      value: "Hello from the environment"
+    command: ["echo"]
+    args: ["$(DEMO_GREETING)"]
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-inter-pod
+  labels:
+    app: jedi
+spec:
+  restartPolicy: Never
+  containers:
+    - name: myapp
+      image: bitnami/nginx
+      ports:
+        - containerPort: 8080
+          name: http
+      env:
+        - name: MY_NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: MY_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: MY_POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+      command: ["echo"]
+      args: ["$(MY_NODE_NAME) $(MY_POD_NAME) $(MY_POD_IP)"]
+
+

Resource Requirements

+

When you specify a Pod, you can optionally specify how much CPU and memory (RAM) each Container needs. When Containers have resource requests specified, the scheduler can make better decisions about which nodes to place Pods on.

+

CPU and memory are each a resource type. A resource type has a base unit. CPU is specified in units of cores, and memory is specified in units of bytes.

+

Resources

+
+ +
+

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  containers:
+  - name: my-app
+    image: bitnami/nginx
+    ports:
+      - containerPort: 8080
+    resources:
+      requests:
+        memory: "64Mi"
+        cpu: "250m"
+      limits:
+        memory: "128Mi"
+        cpu: "500m"
+
+

Namespaced defaults mem +

apiVersion: v1
+kind: LimitRange
+metadata:
+  name: mem-limit-range
+spec:
+  limits:
+  - default:
+      memory: 512Mi
+    defaultRequest:
+      memory: 256Mi
+    type: Container
+

+

Namespaced defaults mem +

apiVersion: v1
+kind: LimitRange
+metadata:
+  name: cpu-limit-range
+spec:
+  limits:
+  - default:
+      cpu: 1
+    defaultRequest:
+      cpu: 0.5
+    type: Container
+

+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Pod ConfigurationConfigure a pod to meet compute resource requirements.Pod Configuration
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/configuration/secrets/index.html b/openshift/configuration/secrets/index.html new file mode 100644 index 0000000..b7d7446 --- /dev/null +++ b/openshift/configuration/secrets/index.html @@ -0,0 +1,1913 @@ + + + + + + + + + + + + + + + + + + + + + + + Secrets - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Secrets

+

Kubernetes secret objects let you store and manage sensitive information, such as passwords, OAuth tokens, and ssh keys. Putting this information in a secret is safer and more flexible than putting it verbatim in a Pod definition or in a container image.

+

A Secret is an object that contains a small amount of sensitive data such as a password, a token, or a key. Such information might otherwise be put in a Pod specification or in an image; putting it in a Secret object allows for more control over how it is used, and reduces the risk of accidental exposure.

+

Resources

+
+
+
+
+
    +
  • +

    Image Pull Secrets

    +
    +

    Install mkdocs-material with pip and get up + and running in minutes

    +

    Getting started

    +
  • +
  • +

    It's just Markdown

    +
    +

    Focus on your content and generate a responsive and searchable static site

    +

    Reference

    +
  • +
+
+

Image Pull Secrets

+

Secret Commands

+
+ +
+
+

References

+
apiVersion: v1
+kind: Secret
+metadata:
+  name: mysecret
+type: Opaque
+data:
+  username: YWRtaW4=
+stringData:
+  admin: administrator
+
+
apiVersion: v1
+kind: Secret
+metadata:
+  name: mysecret-config
+type: Opaque
+stringData:
+  config.yaml: |-
+    apiUrl: "https://my.api.com/api/v1"
+    username: token
+    password: thesecrettoken
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  containers:
+  - name: my-app
+    image: bitnami/nginx
+    ports:
+      - containerPort: 8080
+    env:
+      - name: SECRET_USERNAME
+        valueFrom:
+          secretKeyRef:
+            name: mysecret
+            key: username
+    envFrom:
+      - secretRef:
+          name: mysecret
+    volumeMounts:
+      - name: config
+        mountPath: "/etc/secrets"
+  volumes:
+    - name: config
+      secret:
+        secretName: mysecret-config
+
+
+
+
+

Create files needed for rest of example. +

echo -n 'admin' > ./username.txt
+echo -n '1f2d1e2e67df' > ./password.txt
+
+Creating Secret from files. +
oc create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt
+
+Getting Secret +
oc get secrets
+
+Gets the Secret's Description. +
oc describe secrets/db-user-pass
+

+
+
+

Create files needed for rest of example. +

echo -n 'admin' > ./username.txt
+echo -n '1f2d1e2e67df' > ./password.txt
+
+** Creates the Secret from the files +
kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt
+
+
Gets the Secret +
kubectl get secrets
+
+
Gets the Secret's Description.** +
kubectl describe secrets/db-user-pass
+

+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/configuration/security-contexts/index.html b/openshift/configuration/security-contexts/index.html new file mode 100644 index 0000000..ad121e6 --- /dev/null +++ b/openshift/configuration/security-contexts/index.html @@ -0,0 +1,1889 @@ + + + + + + + + + + + + + + + + + + + + + + + Security Contexts - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Security Contexts

+

A security context defines privilege and access control settings for a Pod or Container.

+

To specify security settings for a Pod, include the securityContext field in the Pod specification. The securityContext field is a PodSecurityContext object. The security settings that you specify for a Pod apply to all Containers in the Pod.

+

Resources

+
+ +
+

References

+

Setup minikube VM with users +

minikube ssh
+
+
su -
+
+
echo "container-user-0:x:2000:2000:-:/home/container-user-0:/bin/bash" >> /etc/passwd
+echo "container-user-1:x:2001:2001:-:/home/container-user-1:/bin/bash" >> /etc/passwd
+echo "container-group-0:x:3000:" >>/etc/group
+echo "container-group-1:x:3001:" >>/etc/group
+mkdir -p /etc/message/
+echo "Hello, World!" | sudo tee -a /etc/message/message.txt
+chown 2000:3000 /etc/message/message.txt
+chmod 640 /etc/message/message.txt
+

+

Using the this securityContext the container will be able to read the file /message/message.txt +

apiVersion: v1
+kind: Pod
+metadata:
+  name: my-securitycontext-pod
+spec:
+  restartPolicy: Never
+  securityContext:
+    runAsUser: 2000
+    runAsGroup: 3000
+    fsGroup: 3000
+  containers:
+    - name: myapp-container
+      image: busybox
+      command: ["sh", "-c", "cat /message/message.txt && sleep 3600"]
+      volumeMounts:
+        - name: message-volume
+          mountPath: /message
+  volumes:
+    - name: message-volume
+      hostPath:
+        path: /etc/message
+

+

Using the this securityContext the container should NOT be able to read the file /message/message.txt +

apiVersion: v1
+kind: Pod
+metadata:
+  name: my-securitycontext-pod
+spec:
+  restartPolicy: Never
+  securityContext:
+    runAsUser: 2001
+    runAsGroup: 3001
+    fsGroup: 3001
+  containers:
+    - name: myapp-container
+      image: busybox
+      command: ["sh", "-c", "cat /message/message.txt && sleep 3600"]
+      volumeMounts:
+        - name: message-volume
+          mountPath: /message
+  volumes:
+    - name: message-volume
+      hostPath:
+        path: /etc/message
+
+** Run to see the errors **

+
+
+
+
Get Pod Logs
oc logs my-securitycontext-pod
+
+
Should return
cat: can't open '/message/message.text': Permission denied
+
+
+
+
Get Pod Logs
kubectl logs my-securitycontext-pod
+
+
Should return
cat: can't open '/message/message.txt': Permission denied
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/configuration/service-accounts/index.html b/openshift/configuration/service-accounts/index.html new file mode 100644 index 0000000..4824c03 --- /dev/null +++ b/openshift/configuration/service-accounts/index.html @@ -0,0 +1,1857 @@ + + + + + + + + + + + + + + + + + + + + + + + Service Accounts - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Service Accounts

+

A service account provides an identity for processes that run in a Pod.

+

When you (a human) access the cluster (for example, using kubectl), you are authenticated by the apiserver as a particular User Account (currently this is usually admin, unless your cluster administrator has customized your cluster). Processes in containers inside pods can also contact the apiserver. When they do, they are authenticated as a particular Service Account (for example, default).

+

User accounts are for humans. Service accounts are for processes, which run in pods.

+

User accounts are intended to be global. Names must be unique across all namespaces of a cluster, future user resource will not be namespaced. Service accounts are namespaced.

+

Resources

+ +

References

+
apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: my-service-account
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  serviceAccountName: my-service-account
+  containers:
+  - name: my-app
+    image: bitnami/nginx
+    ports:
+      - containerPort: 8080
+
+
apiVersion: v1
+kind: Secret
+metadata:
+  name: build-robot-secret
+  annotations:
+    kubernetes.io/service-account.name: my-service-account
+type: kubernetes.io/service-account-token
+
+
+
+
+
Create a Service Account
oc create sa <service_account_name>
+
+
View Service Account Details
oc describe sa <service_account_name>
+
+
+
+
Create a Service Account
kubectl create sa <service_account_name>
+
+
View Service Account Details
kubectl describe sa <service_account_name>
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/core-concepts/index.html b/openshift/core-concepts/index.html new file mode 100644 index 0000000..ecdac33 --- /dev/null +++ b/openshift/core-concepts/index.html @@ -0,0 +1,1837 @@ + + + + + + + + + + + + + + + + + + + + + + + API Primatives - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes API Primitives

+

Kubernetes API primitive, also known as Kubernetes objects, are the basic building blocks of any application running in Kubernetes

+

Examples:

+
    +
  • Pod
  • +
  • Node
  • +
  • Service
  • +
  • ServiceAccount
  • +
+

Two primary members

+
    +
  • Spec, desired state
  • +
  • Status, current state
  • +
+

Resources

+
+ +
+

References

+
+
+
+
List API-Resources
oc api-resources
+
+
+
+
List API-Resources
kubectl api-resources
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/core-concepts/namespaces-projects/index.html b/openshift/core-concepts/namespaces-projects/index.html new file mode 100644 index 0000000..1e42377 --- /dev/null +++ b/openshift/core-concepts/namespaces-projects/index.html @@ -0,0 +1,1856 @@ + + + + + + + + + + + + + + + + + + + + + + + Namespaces/Projects - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Projects/Namespaces

+

Namespaces are intended for use in environments with many users spread across multiple teams, or projects.

+

Namespaces provide a scope for names. Names of resources need to be unique within a namespace, but not across namespaces.

+

Namespaces are a way to divide cluster resources between multiple users (via resource quota).

+

It is not necessary to use multiple namespaces just to separate slightly different resources, such as different versions of the same software: use labels to distinguish resources within the same namespace. In practice namespaces are used to deploy different versions based on stages of the CICD pipeline (dev, test, stage, prod)

+

Resources

+ +

References

+
Namespace YAML
apiVersion: v1
+kind: Namespace
+metadata:
+  name: dev
+
+
Pod YAML specifiying Namespace
apiVersion: v1
+kind: Pod
+metadata:
+  name: myapp-pod
+  namespace: dev
+spec:
+  containers:
+    - name: myapp-container
+      image: busybox
+      command: ["sh", "-c", "echo Hello Kubernetes! && sleep 3600"]
+
+
+
+
+
Getting all namespaces/projects
oc projects
+
+
Create a new Project
oc new-project dev
+
+
Viewing Current Project
oc project
+
+
Setting Namespace in Context
oc project dev
+
+
Viewing Project Status
oc status
+
+
+
+
Getting all namespaces
kubectl get namespaces
+
+
Create a new namespace called bar
kubectl create ns dev
+
+
Setting Namespace in Context
kubectl config set-context --current --namespace=dev
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/deployments/index.html b/openshift/deployments/index.html new file mode 100644 index 0000000..743313b --- /dev/null +++ b/openshift/deployments/index.html @@ -0,0 +1,1851 @@ + + + + + + + + + + + + + + + + + + + + + + + Deployments - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Deployments

+

A Deployment provides declarative updates for Pods and ReplicaSets.

+

You describe a desired state in a Deployment, and the Deployment Controller changes the actual state to the desired state at a controlled rate. You can define Deployments to create new ReplicaSets, or to remove existing Deployments and adopt all their resources with new Deployments.

+

The following are typical use cases for Deployments: +- Create a Deployment to rollout a ReplicaSet. The ReplicaSet creates Pods in the background. Check the status of the rollout to see if it succeeds or not. +- Declare the new state of the Pods by updating the PodTemplateSpec of the Deployment. A new ReplicaSet is created and the Deployment manages moving the Pods from the old ReplicaSet to the new one at a controlled rate. Each new ReplicaSet updates the revision of the Deployment. +- Rollback to an earlier Deployment revision if the current state of the Deployment is not stable. Each rollback updates the revision of the Deployment. +- Scale up the Deployment to facilitate more load. +- Pause the Deployment to apply multiple fixes to its PodTemplateSpec and then resume it to start a new rollout. +- Use the status of the Deployment as an indicator that a rollout has stuck. +- Clean up older ReplicaSets that you don’t need anymore.

+

Resources

+ +

References

+
apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: my-deployment
+  labels:
+    app: nginx
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+    spec:
+      containers:
+      - name: nginx
+        image: bitnami/nginx:1.16.0
+        ports:
+        - containerPort: 8080
+
+
+
+
+
Create a Deployment
oc apply -f deployment.yaml
+
+
Get Deployment
oc get deployment my-deployment
+
+
Get Deployment's Description
oc describe deployment my-deployment
+
+
Edit Deployment
oc edit deployment my-deployment
+
+
Scale Deployment
oc scale deployment/my-deployment --replicas=3
+
+
Delete Deployment
oc delete deployment my-deployment
+
+
+
+
Create a Deployment
kubectl apply -f deployment.yaml
+
+
Get Deployment
kubectl get deployment my-deployment
+
+
Get Deployment's Description
kubectl describe deployment my-deployment
+
+
Edit Deployment
kubectl edit deployment my-deployment
+
+
Scale Deployment
kubectl scale deployment/my-deployment --replicas=3
+
+
Delete Deployment
kubectl delete deployment my-deployment
+
+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Rolling Updates LabCreate a Rolling Update for your applicationRolling Updates
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/deployments/updates/index.html b/openshift/deployments/updates/index.html new file mode 100644 index 0000000..3f6b4e3 --- /dev/null +++ b/openshift/deployments/updates/index.html @@ -0,0 +1,1872 @@ + + + + + + + + + + + + + + + + + + + + + + + Rolling Updates - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Rolling Updates and Rollbacks

+

Updating a Deployment +A Deployment’s rollout is triggered if and only if the Deployment’s Pod template (that is, .spec.template) is changed, for example if the labels or container images of the template are updated. Other updates, such as scaling the Deployment, do not trigger a rollout.

+

Each time a new Deployment is observed by the Deployment controller, a ReplicaSet is created to bring up the desired Pods. If the Deployment is updated, the existing ReplicaSet that controls Pods whose labels match .spec.selector but whose template does not match .spec.template are scaled down. Eventually, the new ReplicaSet is scaled to .spec.replicas and all old ReplicaSets is scaled to 0.

+

Label selector updates +It is generally discouraged to make label selector updates and it is suggested to plan your selectors up front. In any case, if you need to perform a label selector update, exercise great caution and make sure you have grasped all of the implications.

+

Rolling Back a Deployment +Sometimes, you may want to rollback a Deployment; for example, when the Deployment is not stable, such as crash looping. By default, all of the Deployment’s rollout history is kept in the system so that you can rollback anytime you want (you can change that by modifying revision history limit).

+

A Deployment’s revision is created when a Deployment’s rollout is triggered. This means that the new revision is created if and only if the Deployment’s Pod template (.spec.template) is changed, for example if you update the labels or container images of the template. Other updates, such as scaling the Deployment, do not create a Deployment revision, so that you can facilitate simultaneous manual- or auto-scaling. This means that when you roll back to an earlier revision, only the Deployment’s Pod template part is rolled back.

+

Resources

+ +

References

+
apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: my-deployment
+  labels:
+    app: nginx
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+    spec:
+      containers:
+      - name: nginx
+        image: bitnami/nginx:1.16.0
+        ports:
+        - containerPort: 8080
+
+
+
+
+
Get Deployments
oc get deployments
+
+
Sets new image for Deployment
oc set image deployment/my-deployment nginx=bitnami/nginx:1.16.1 --record
+
+
Check the status of a rollout
oc rollout status deployment my-deployment
+
+
Get Replicasets
oc get rs
+
+
Get Deployment Description
oc describe deployment my-deployment
+
+
Get Rollout History
oc rollout history deployment my-deployment
+
+
Undo Rollout
oc rollback my-deployment
+
+
Delete Deployment
oc delete deployment my-deployment
+
+
+
+
Create a Deployment
kubectl apply -f deployment.yaml
+
+
Create a new namespace called bar
kubectl create ns dev
+
+
Setting Namespace in Context
kubectl config set-context --current --namespace=dev
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/images/blue-green.png b/openshift/images/blue-green.png new file mode 100644 index 0000000..db4fef7 Binary files /dev/null and b/openshift/images/blue-green.png differ diff --git a/openshift/images/canary-release.png b/openshift/images/canary-release.png new file mode 100644 index 0000000..b2144f0 Binary files /dev/null and b/openshift/images/canary-release.png differ diff --git a/openshift/images/fixed-deploy.png b/openshift/images/fixed-deploy.png new file mode 100644 index 0000000..1ff2898 Binary files /dev/null and b/openshift/images/fixed-deploy.png differ diff --git a/openshift/images/rolling-deploy.png b/openshift/images/rolling-deploy.png new file mode 100644 index 0000000..c9be489 Binary files /dev/null and b/openshift/images/rolling-deploy.png differ diff --git a/openshift/index.html b/openshift/index.html new file mode 100644 index 0000000..e9ba01c --- /dev/null +++ b/openshift/index.html @@ -0,0 +1,2020 @@ + + + + + + + + + + + + + + + + + + + + + + + Kubernetes & OpenShift Overview - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Kubernetes & OpenShift Overview

+

Introduction

+

Kubernetes is an open source container orchestration platform that automates deployment, management and scaling of applications. Learn how Kubernetes enables cost-effective cloud native development.

+

What is Kubernetes?

+

Kubernetes—also known as ‘k8s’ or ‘kube’—is a container orchestration platform for scheduling and automating the deployment, management, and scaling of containerized applications.

+

Kubernetes was first developed by engineers at Google before being open sourced in 2014. +It is a descendant of ‘Borg,’ a container orchestration platform used internally at Google. (Kubernetes is Greek for helmsman or pilot, hence the helm in the Kubernetes logo.)

+

Today, Kubernetes and the broader container ecosystem are maturing into a general-purpose computing platform and ecosystem that rivals—if not surpasses—virtual machines (VMs) as the basic building blocks of modern cloud infrastructure and applications. +This ecosystem enables organizations to deliver a high-productivity Platform-as-a-Service (PaaS) that addresses multiple infrastructure- and operations-related tasks and issues surrounding cloud native development so that development teams can focus solely on coding and innovation.

+

https://www.ibm.com/cloud/learn/kubernetes

+ + +

Presentations

+

Kubernetes Overview

+

Predictable Demands Pattern

+

An application's performance, efficiency, and behaviors are reliant upon it's ability to have the appropriate allocation of resources. The Predictable Demands pattern is based on declaring the dependencies and resources needed by a given application. The scheduler will prioritize an application with a defined set of resources and dependencies since it can better manage the workload across nodes in the cluster. Each application has a different set of dependencies which we will touch on next.

+

Runtime Dependencies

+

One of the most common runtime dependency's is the exposure of a container's specific port through hostPort. Different applications can specify the same port through hostPort which reserves the port on each node in the cluster for the specific container. This declaration restricts multiple continers with the same hostPort to be deployed on the same nodes in the cluster and restricts the scale of pods to the number of nodes you have in the cluster.

+

Another runtime dependency is file storage for saving the application state. Kubernetes offers Pod-level storage utilities that are capable of surviving container restarts. Applications needing to read or write to these storage mechanisms will require nodes that is provided the type of volume required by the application. If there is no nodes available with the required volume type, then the pod will not be scheduled to be deployed at all.

+

A different kind of dependency is configurations. ConfigMaps are used by Kubernetes to strategically plan out how to consume it's settings through either environment variables or the filesystem. Secrets are consumed the same was as a ConfigMap in Kubernetes. Secrets are a more secure way to distribute environment-specific configurations to containers within the pod.

+

Resource Profiles

+

Resource Profiles are definitions for the compute resources required for a container. Resources are categorized in two ways, compressible and incompressible. Compressible resources include resources that can be throttled such as CPU or network bandwidth. Incompressible represents resouces that can't be throttled such as memory where there is no other way to release the allocated resource other than killing the container. The difference between compressible and incompressible is very important when it comes to planning the deployment of pods and containers since the resource allocation can be affected by the limits of each.

+

Every application needs to have a specified minimum and maximum amount of resources that are needed. The minimum amount is called "requests" and the maximum is the "limits". The scheduler uses the requests to determine the assignment of pods to nodes ensuring that the node will have enough capacity to accommodate the pod and all of it's containers required resources. An example of defined resource limits is below:

+

Different levels of Quality of Service (QoS) are offered based on the specified requests and limits.

+
    +
  1. Quality of Service Levels +Best Effort;; + Lowest priority pod with no requests or limits set for it's containers. These pods will be the first of any pods killed if resources run low. +Burstable;; + Limits and requests are defined but they are not equal. The pod will use the minimum amount of resources, but will consume more if needed up to the limit. If the needed resources become scarce then these pods will be killed if no Best Effort pods are left. +Guaranteed;; + Highest priority pods with an equal amount of requests and limits. These pods will be the last to be killed if resources run low and no Best Effort or Burstable pods are left.
  2. +
+

Pod Priority

+

The priority of pods can be defined through a PriorityClass object. The PriorityClass object allows developers to indicate the importance of a pod relative to the other pods in the cluster. The higher the priority number then the higher the priority of the pod. The scheduler looks at a pods priorityClassName to populate the priority of new pods. As pods are being placed in the scheduling queue for deployment, the scheduler orders them from highest to lowest.

+

Another key feature for pod priority is the Preemption feature. The Preemption feature occurs when there are no nodes with enough capacity to place a pod. If this occurs the scheduler can preempt (remove) lower-priority Pods from nodes to free up resources and place Pods with higher priority. This effectively allows system administrators the ability to control which critical pods get top priority for resources in the cluster as well as controlling which critical workloads are able to be run on the cluster first. If a pod can not be scheduled due to constraints it will continue on with lower-priority nodes.

+

Pod Priority should be used with caution for this gives users the ability to control over the kubernetes scheduler and ability to place or kill pods that may interrupt the cluster's critical functions. New pods with higher priority than others can quickly evict pods with lower priority that may be critical to a container's performance. ResourceQuota and PodDisruptionBudget are two tools that help combat this from happening read more here.

+

Declarative Deployment Pattern

+

With a growing number of microservices, reliance on an updating process for the services has become ever more important. Upgrading services is usually accompanied with some downtime for users or an increase in resource usage. Both of these can lead to an error effecting the performance of the application making the release process a bottleneck.

+

A way to combat this issue in Kubernetes is through the use of Deployments. There are different approaches to the updating process that we will cover below. Any of these approaches can be put to use in order to save time for developers during their release cycles which can last from a few minutes to a few months.

+

Rolling Deployment

+

A Rolling Deployment ensures that there is no downtime during the update process. Kubernetes creates a new ReplicaSet for the new version of the service to be rolled out. From there Kubernetes creates set of pods of the new version while leaving the old pods running. Once the new pods are all up and running they will replace the old pods and become the primary pods users access.

+

Rolling Deployment

+

The upside to this approach is that there is no downtime and the deployment is handled by kubernetes through a deployment like the one below. The downside is with two sets of pods running at one time there is a higher usage of resources that may lead to performance issues for users.

+

Fixed Deployment

+

A Fixed Deployment uses the Recreate strategy which sets the maxUnavailable setting to the number of declared replicas. This in effect starts the versions of the pods as the old versions are being killed. The starting and stopping of containers does create a little bit of downtime for customers while the starting and stopping is taking place, but the positive side is the users will only have to handle one version at a time.

+

Fixed Deployment

+

Blue-Green Release

+

A Blue-Green Release involves a manual process of creating a second deployment of pods with the newest version of the application running as well as keeping the old version of pods running in the cluster. Once the new pods are up and running properly the administrator shifts the traffic over to the new pods. Below is a diagram showing both versions up and running with the traffic going to the newer (green) pods.

+

Blue-Green

+

The downfall to this approach is the use of resources with two separate groups of pods running at the same time which could cause performance issues or complications. However, the advantage of this approach is users only experience one version at a time and it's easy to quickly switch back to the old version with no downtime if an issue arises with the newer version.

+

Canary Release

+

A Canary Release involves only standing up one pod of the new application code and shifting only a limited amount of new users traffic to that pod. This approach reduces the number of people exposed to the new service allowing the administrator to see how the new version is performing. Once the team feels comfortable with the performance of the new service then more pods can be stood up to replace the old pods. An advantage to this approach is no downtime with any of the services as the new service is being scaled.

+

Canary Release

+

Health Probe Pattern

+

The Health Probe pattern revolves the health of applications being communicated to Kubernetes. To be fully-automatable, cloud-applications must be highly observable in order for Kubernetes to know which applications are up and ready to receive traffic and which cannot. Kubernetes can use that information for traffic direction, self-healing, and to achieve the desired state of the application.

+

Process Health Checks

+

The simplest health check in kubernetes is the Process Health Check. Kubernetes simply probes the application's processes to see if they are running or not. The process check tells kubernetes when a process for an application needs to be restarted or shut down in the case of a failure.

+

Liveness Probes

+

A Liveness Probe is performed by the Kubernetes Kubelet agent and asks the container to confirm it's health. A simple process check can return that the container is healthy, but the container to users may not be performing correctly. The liveness probe addresses this issue but asking the container for its health from outside of the container itself. If a failure is found it may require that the container be restarted to get back to normal health. A liveness probe can perform the following actions to check health:

+
    +
  • HTTP GET and expects a success which is code 200-399.
  • +
  • A TCP Socket Probe and expects a successful connection.
  • +
  • A Exec Probe which executes a command and expects a successful exit code (0).
  • +
+

The action chosen to be performed for testing depends on the nature of the application and which action fits best. Always keep in mind that a failing health check results in a restart of the container from Kubernetes, so make sure the right health check is in place if the underlying issue can't be fixed.

+

Readiness Probes

+

A Readiness Probe is very similar to a Liveness probe, but the resulting action to a failed Readiness probe is different. When a liveness probe fails the container is restarted and, in some scenarios, a simple restart won't fix the issue, which is where a readiness probe comes in. A failed readiness probe won't restart the container but will disconnect it from the traffic endpoint. Removing a container from traffic allows it to get up and running smoothly before being tossed into service unready to handle requests from users. Readiness probes give an application time to catch up and make itself ready again to handle more traffic versus shutting down completely and simply creating a new pod. In most cases, liveness and readiness probes are run together on the same application to make sure that the container has time to get up and running properly as well as stays healthy enough to handle the traffic.

+

Managed Lifecycle Pattern

+

The Managed Lifecycle pattern describes how containers need to adapt their lifecycles based on the events that are communicated from a managing platform such as Kubernetes. Containers do not have control of their own lifecycles. It's the managing platforms that allow them to live or die, get traffic or have none, etc. This pattern covers how the different events can affect those lifecycle decisions.

+

SIGTERM

+

The SIGTERM is a signal that is sent from the managing platform to a container or pod that instructs the pod or container to shutdown or restart. This signal can be sent due to a failed liveness test or a failure inside the container. SIGKILL allows the container to cleaning and properly shut itself down versus SIGKILL, which we will get to next. Once received, the application will shutdown as quickly as it can, allowing other processes to stop properly and cleaning up other files. Each application will have a different shutdown time based on the tasks needed to be done.

+

SIGKILL

+

SIGKILL is a signal sent to a container or pod forcing it to shutdown. A SIGKILL is normally sent after the SIGTERM signal. There is a default 30 second grace period between the time that SIGTERM is sent to the application and SIGKILL is sent. The grace period can be adjusted for each pod using the .spec.terminationGracePeriodSeconds field. The overall goal for containerized applications should be aimed to have designed and implemented quick startup and shutdown operations.

+

postStart

+

The postStart hook is a command that is run after the creation of a container and begins asynchronously with the container's primary process. PostStart is put in place in order to give the container time to warm up and check itself during startup. During the postStart loop the container will be labeled in "pending" mode in kubernetes while running through it's initial processes. If the postStart function errors out it will do so with a nonzero exit code and the container process will be killed by Kubernetes. Careful planning must be done when deciding what logic goes into the postStart function because if it fails the container will also fail to start. Both postStart and preStop have two handler types that they run:

+
    +
  • +

    exec: Runs a command directly in the container.

    +
  • +
  • +

    httpGet: Executes an HTTP GET request against an opened port on the pod container.

    +
  • +
+

preStop

+

The preStop hook is a call that blocks a container from terminating too quickly and makes sure the container has a graceful shutdown. The preStop call must finish before the container is deleted by the container runtime. The preStop signal does not stop the container from being deleted completely, it is only an alternative to a SIGTERM signal for a graceful shutdown.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/materials/03-Kubernetes-Basics.pdf b/openshift/materials/03-Kubernetes-Basics.pdf new file mode 100644 index 0000000..9aa1da4 Binary files /dev/null and b/openshift/materials/03-Kubernetes-Basics.pdf differ diff --git a/openshift/materials/03-Kubernetes-Basics.pptx.zip b/openshift/materials/03-Kubernetes-Basics.pptx.zip new file mode 100644 index 0000000..b4deaf5 Binary files /dev/null and b/openshift/materials/03-Kubernetes-Basics.pptx.zip differ diff --git a/openshift/operators/operators/index.html b/openshift/operators/operators/index.html new file mode 100644 index 0000000..7653dc3 --- /dev/null +++ b/openshift/operators/operators/index.html @@ -0,0 +1,1725 @@ + + + + + + + + + + + + + + + + + + + + + + + Operators - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Operators

+

Operators in Openshift are...

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/health-checks/index.html b/openshift/pods/health-checks/index.html new file mode 100644 index 0000000..e9fbbfd --- /dev/null +++ b/openshift/pods/health-checks/index.html @@ -0,0 +1,2173 @@ + + + + + + + + + + + + + + + + + + + + + + + Health and Monitoring - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Health and Monitoring

+

Liveness and Readiness Probes

+

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. There are three types of handlers:

+

ExecAction: Executes a specified command inside the Container. The diagnostic is considered successful if the command exits with a status code of 0.

+

TCPSocketAction: Performs a TCP check against the Container’s IP address on a specified port. The diagnostic is considered successful if the port is open.

+

HTTPGetAction: Performs an HTTP Get request against the Container’s IP address on a specified port and path. The diagnostic is considered successful if the response has a status code greater than or equal to 200 and less than 400.

+

The kubelet can optionally perform and react to three kinds of probes on running Containers:

+

livenessProbe: Indicates whether the Container is running. Runs for the lifetime of the Container.

+

readinessProbe: Indicates whether the Container is ready to service requests. Only runs at start.

+

Resources

+ +

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  containers:
+  - name: app
+    image: busybox
+    command: ['sh', '-c', "echo Hello, Kubernetes! && sleep 3600"]
+    livenessProbe:
+      exec:
+        command: ['echo','alive']
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  shareProcessNamespace: true
+  containers:
+  - name: app
+    image: bitnami/nginx
+    ports:
+    - containerPort: 8080
+    livenessProbe:
+      tcpSocket:
+        port: 8080
+      initialDelaySeconds: 10
+    readinessProbe:
+      httpGet:
+        path: /
+        port: 8080
+      periodSeconds: 10
+
+

Container Logging

+

Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity.

+

Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster.

+

Resources

+

OpenShift

+ +

IKS

+ +

References

+
Pod Example
apiVersion: v1
+kind: Pod
+metadata:
+  name: counter
+spec:
+  containers:
+  - name: count
+    image: busybox
+    command: ['sh','-c','i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 5; done']
+
+
+
+
+
Get Logs
oc logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
Get Logs
kubectl logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
+

Monitoring Applications

+

To scale an application and provide a reliable service, you need to understand how the application behaves when it is deployed. You can examine application performance in a Kubernetes cluster by examining the containers, pods, services, and the characteristics of the overall cluster. Kubernetes provides detailed information about an application’s resource usage at each of these levels. This information allows you to evaluate your application’s performance and where bottlenecks can be removed to improve overall performance.

+

Prometheus, a CNCF project, can natively monitor Kubernetes, nodes, and Prometheus itself.

+

Resources

+

OpenShift

+ +

IKS

+ +

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: 500m
+spec:
+  containers:
+  - name: app
+    image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
+    resources:
+      requests:
+        cpu: 700m
+        memory: 128Mi
+  - name: busybox-sidecar
+    image: radial/busyboxplus:curl
+    command: [/bin/sh, -c, 'until curl localhost:8080/ConsumeCPU -d "millicores=500&durationSec=3600"; do sleep 5; done && sleep 3700']
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: 200m
+spec:
+  containers:
+  - name: app
+    image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
+    resources:
+      requests:
+        cpu: 300m
+        memory: 64Mi
+  - name: busybox-sidecar
+    image: radial/busyboxplus:curl
+    command: [/bin/sh, -c, 'until curl localhost:8080/ConsumeCPU -d "millicores=200&durationSec=3600"; do sleep 5; done && sleep 3700']
+
+
+
+
+
oc get projects
+oc api-resources -o wide
+oc api-resources -o name
+
+oc get nodes,ns,po,deploy,svc
+
+oc describe node --all
+
+
+
+

Verify Metrics is enabled +

kubectl get --raw /apis/metrics.k8s.io/
+

+

Get Node Description +

kubectl describe node
+

+

Check Resource Usage +

kubectl top pods
+kubectl top nodes
+

+
+
+
+

+

+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
ProbesCreate some Health & Startup Probes to find what's causing an issue.Probes
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/index.html b/openshift/pods/index.html new file mode 100644 index 0000000..05d4752 --- /dev/null +++ b/openshift/pods/index.html @@ -0,0 +1,1845 @@ + + + + + + + + + + + + + + + + + + + + + + + Pods - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Pods

+

A Pod is the basic execution unit of a Kubernetes application–the smallest and simplest unit in the Kubernetes object model that you create or deploy. A Pod represents processes running on your Cluster.

+

A Pod encapsulates an application’s container (or, in some cases, multiple containers), storage resources, a unique network IP, and options that govern how the container(s) should run. A Pod represents a unit of deployment: a single instance of an application in Kubernetes, which might consist of either a single container or a small number of containers that are tightly coupled and that share resources.

+

Resources

+ +

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: myapp-pod
+  labels:
+    app: myapp
+spec:
+  containers:
+    - name: myapp-container
+      image: busybox
+      command: ["sh", "-c", "echo Hello Kubernetes! && sleep 3600"]
+
+
+
+
+

Create Pod using yaml file

+
oc apply -f pod.yaml
+
+

Get Current Pods in Project

+
oc get pods
+
+

Get Pods with their IP and node location

+
oc get pods -o wide
+
+

Get Pod's Description

+
oc describe pod myapp-pod
+
+

Get the logs

+
oc logs myapp-pod
+
+

Delete a Pod

+
oc delete pod myapp-pod
+
+
+
+

Create Pod using yaml file

+
kubectl apply -f pod.yaml
+
+

Get Current Pods in Project

+
kubectl get pods
+
+

Get Pods with their IP and node location

+
kubectl get pods -o wide
+
+

Get Pod's Description

+
kubectl describe pod myapp-pod
+
+

Get the logs

+
kubectl logs myapp-pod
+
+

Delete a Pod

+
kubectl delete pod myapp-pod
+
+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Creating PodsCreate a Pod YAML file to meet certain parametersPod Creation
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/jobs/index.html b/openshift/pods/jobs/index.html new file mode 100644 index 0000000..fc3cf08 --- /dev/null +++ b/openshift/pods/jobs/index.html @@ -0,0 +1,1964 @@ + + + + + + + + + + + + + + + + + + + + + + + Jobs and CronJobs - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Jobs and CronJobs

+

Jobs +A Job creates one or more Pods and ensures that a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (ie, Job) is complete. Deleting a Job will clean up the Pods it created.

+

CronJobs +One CronJob object is like one line of a crontab (cron table) file. It runs a job periodically on a given schedule, written in Cron format.

+

All CronJob schedule: times are based on the timezone of the master where the job is initiated.

+

Resources

+

OpenShift +- Jobs +- CronJobs

+

IKS +- Jobs to Completion +- Cron Jobs +- Automated Tasks with Cron

+

References

+

It computes π to 2000 places and prints it out +

apiVersion: batch/v1
+kind: Job
+metadata:
+  name: pi
+spec:
+  template:
+    spec:
+      containers:
+      - name: pi
+        image: perl
+        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
+      restartPolicy: Never
+  backoffLimit: 4
+

+

Running in parallel +

apiVersion: batch/v1
+kind: Job
+metadata:
+  name: pi
+spec:
+  parallelism: 2
+  completions: 3
+  template:
+    spec:
+      containers:
+        - name: pi
+          image: perl
+          command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
+      restartPolicy: Never
+  backoffLimit: 4
+

+
apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+  name: hello
+spec:
+  schedule: "*/1 * * * *"
+  jobTemplate:
+    spec:
+      template:
+        spec:
+          containers:
+          - name: hello
+            image: busybox
+            args:
+            - /bin/sh
+            - -c
+            - date; echo Hello from the Kubernetes cluster
+          restartPolicy: OnFailure
+
+
+
+
+

Gets Jobs +

oc get jobs
+
+Gets Job Description +
oc describe job pi
+
+Gets Pods from the Job +
oc get pods
+
+Deletes Job +
oc delete job pi
+
+Gets CronJob +
oc get cronjobs
+
+Describes CronJob +
oc describe cronjobs pi
+
+Gets Pods from CronJob +
oc get pods
+
+Deletes CronJob +
oc delete cronjobs pi
+

+
+
+

Gets Jobs +

kubectl get jobs
+
+Gets Job Description +
kubectl describe job pi
+
+Gets Pods from the Job +
kubectl get pods
+
+Deletes Job +
kubectl delete job pi
+
+Gets CronJob +
kubectl get cronjobs
+
+Describes CronJob +
kubectl describe cronjobs pi
+
+Gets Pods from CronJob +
kubectl get pods
+
+Deletes CronJob +
kubectl delete cronjobs pi
+

+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
Rolling Updates LabCreate a Rolling Update for your application.Rolling Updates
Cron Jobs LabUsing Tekton to test new versions of applications.Crons Jobs
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/multi-container/index.html b/openshift/pods/multi-container/index.html new file mode 100644 index 0000000..9edb786 --- /dev/null +++ b/openshift/pods/multi-container/index.html @@ -0,0 +1,1920 @@ + + + + + + + + + + + + + + + + + + + + + + + Multi-Container - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Multi-Containers Pod

+

Container images solve many real-world problems with existing packaging and deployment tools, but in addition to these significant benefits, containers offer us an opportunity to fundamentally re-think the way we build distributed applications. Just as service oriented architectures (SOA) encouraged the decomposition of applications into modular, focused services, containers should encourage the further decomposition of these services into closely cooperating modular containers. By virtue of establishing a boundary, containers enable users to build their services using modular, reusable components, and this in turn leads to services that are more reliable, more scalable and faster to build than applications built from monolithic containers.

+

Resources

+ +

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  volumes:
+  - name: shared-data
+    emptyDir: {}
+  containers:
+  - name: app
+    image: bitnami/nginx
+    volumeMounts:
+      - name: shared-data
+        mountPath: /app
+    ports:
+    - containerPort: 8080
+  - name: sidecard
+    image: busybox
+    volumeMounts:
+    - name: shared-data
+      mountPath: /pod-data
+    command: ['sh', '-c', 'echo Hello from the side container > /pod-data/index.html && sleep 3600']
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  shareProcessNamespace: true
+  containers:
+  - name: app
+    image: bitnami/nginx
+    ports:
+    - containerPort: 8080
+  - name: sidecard
+    image: busybox
+    securityContext:
+      capabilities:
+        add:
+        - SYS_PTRACE
+    stdin: true
+    tty: true
+
+
+
+
+

Attach Pods Together +

oc attach -it my-pod -c sidecard
+
+
ps ax
+
+
kill -HUP 7
+
+
ps ax
+

+
+
+

Attach Pods Together +

kubectl attach -it my-pod -c sidecard
+
+
ps ax
+
+
kill -HUP 7
+
+
ps ax
+

+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
Try It Yourself
Multiple ContainersBuild a container using legacy container image.Multiple Containers
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/tagging/index.html b/openshift/pods/tagging/index.html new file mode 100644 index 0000000..50bd92e --- /dev/null +++ b/openshift/pods/tagging/index.html @@ -0,0 +1,1884 @@ + + + + + + + + + + + + + + + + + + + + + + + Labels-Selectors - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Labels, Selectors, and Annotations

+

Labels are key/value pairs that are attached to objects, such as pods. Labels are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. Labels can be used to organize and to select subsets of objects. Labels can be attached to objects at creation time and subsequently added and modified at any time. Each object can have a set of key/value labels defined. Each Key must be unique for a given object.

+

You can use Kubernetes annotations to attach arbitrary non-identifying metadata to objects. Clients such as tools and libraries can retrieve this metadata.

+

You can use either labels or annotations to attach metadata to Kubernetes objects. Labels can be used to select objects and to find collections of objects that satisfy certain conditions. In contrast, annotations are not used to identify and select objects. The metadata in an annotation can be small or large, structured or unstructured, and can include characters not permitted by labels.

+

Resources

+
+ +
+

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+  labels:
+    app: foo
+    tier: frontend
+    env: dev
+  annotations:
+    imageregistry: "https://hub.docker.com/"
+    gitrepo: "https://github.com/csantanapr/knative"
+spec:
+  containers:
+  - name: app
+    image: bitnami/nginx
+
+
+
+
+

Change Labels on Objects +

oc label pod my-pod boot=camp
+
+Getting Pods based on their labels. +
oc get pods --show-labels
+
+
oc get pods -L tier,env
+
+
oc get pods -l app
+
+
oc get pods -l tier=frontend
+
+
oc get pods -l 'env=dev,tier=frontend'
+
+
oc get pods -l 'env in (dev, test)'
+
+
oc get pods -l 'tier!=backend'
+
+
oc get pods -l 'env,env notin (prod)'
+
+Delete the Pod. +
oc delete pod my-pod
+

+
+
+

Change Labels on Objects +

kubectl label pod my-pod boot=camp
+
+Getting Pods based on their labels. +
kubectl get pods --show-labels
+
+
kubectl get pods -L tier,env
+
+
kubectl get pods -l app
+
+
kubectl get pods -l tier=frontend
+
+
kubectl get pods -l 'env=dev,tier=frontend'
+
+
kubectl get pods -l 'env in (dev, test)'
+
+
kubectl get pods -l 'tier!=backend'
+
+
kubectl get pods -l 'env,env notin (prod)'
+
+Delete the Pod. +
kubectl delete pod my-pod
+

+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/pods/troubleshooting/index.html b/openshift/pods/troubleshooting/index.html new file mode 100644 index 0000000..16da473 --- /dev/null +++ b/openshift/pods/troubleshooting/index.html @@ -0,0 +1,1935 @@ + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Debugging Applications

+

Kubernetes provides tools to help troubleshoot and debug problems with applications.

+

Usually is getting familiar with how primitives objects interact with each other, checking the status of objects, and finally checking logs for any last resource clues.

+

Resources

+ +

References

+
+
+
+

** MacOS/Linux/Windows command: ** +

oc apply -f https://gist.githubusercontent.com/csantanapr/e823b1bfab24186a26ae4f9ec1ff6091/raw/1e2a0cca964c7b54ce3df2fc3fbf33a232511877/debugk8s-bad.yaml
+

+

** Expose the service using port-forward ** +

oc port-forward service/my-service 8080:80 -n debug
+
+** Try to access the service ** +
curl http://localhost:8080
+

+

** Try Out these Commands to Debug ** +

oc get pods --all-namespaces
+
+
oc project debug
+
+
oc get deployments
+
+
oc describe pod
+
+
oc explain Pod.spec.containers.resources.requests
+
+
oc explain Pod.spec.containers.livenessProbe
+
+
oc edit deployment
+
+
oc logs
+
+
oc get service
+
+
oc get ep
+
+
oc describe service
+
+
oc get pods --show-labels
+
+
oc get deployment --show-labels
+

+
+
+

** MacOS/Linux/Windows command: ** +

kubectl apply -f https://gist.githubusercontent.com/csantanapr/e823b1bfab24186a26ae4f9ec1ff6091/raw/1e2a0cca964c7b54ce3df2fc3fbf33a232511877/debugk8s-bad.yaml
+

+

** Expose the service using port-forward ** +

kubectl port-forward service/my-service 8080:80 -n debug
+
+** Try to access the service ** +
curl http://localhost:8080
+

+

** Try Out these Commands to Debug ** +

kubectl get pods --all-namespaces
+
+
kubectl config set-context --current --namespace=debug
+
+
kubectl get deployments
+
+
kubectl describe pod
+
+
kubectl explain Pod.spec.containers.resources.requests
+
+
kubectl explain Pod.spec.containers.livenessProbe
+
+
kubectl edit deployment
+
+
kubectl logs
+
+
kubectl get service
+
+
kubectl get ep
+
+
kubectl describe service
+
+
kubectl get pods --show-labels
+
+
kubectl get deployment --show-labels
+

+
+
+
+

Activities

+

The continuous integration activities focus around Tekton the integration platform. These labs will show you how to build pipelines and test your code before deployment.

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
DebuggingFind which service is breaking in your cluster and find out why.Debugging
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/services-networking/index.html b/openshift/services-networking/index.html new file mode 100644 index 0000000..9caf184 --- /dev/null +++ b/openshift/services-networking/index.html @@ -0,0 +1,1873 @@ + + + + + + + + + + + + + + + + + + + + + + + Services - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Services

+

An abstract way to expose an application running on a set of Pods as a network service.

+

Kubernetes Pods are mortal. They are born and when they die, they are not resurrected. If you use a Deployment to run your app, it can create and destroy Pods dynamically.

+

Each Pod gets its own IP address, however in a Deployment, the set of Pods running in one moment in time could be different from the set of Pods running that application a moment later.

+

In Kubernetes, a Service is an abstraction which defines a logical set of Pods and a policy by which to access them (sometimes this pattern is called a micro-service). The set of Pods targeted by a Service is usually determined by a selector (see below for why you might want a Service without a selector).

+

If you’re able to use Kubernetes APIs for service discovery in your application, you can query the API server for Endpoints, that get updated whenever the set of Pods in a Service changes.

+

For non-native applications, Kubernetes offers ways to place a network port or load balancer in between your application and the backend Pods.

+

Resources

+
+ +
+

References

+
apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: my-deployment
+  labels:
+    app: nginx
+    version: v1
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+        version: v1
+    spec:
+      containers:
+      - name: nginx
+        image: bitnami/nginx
+        ports:
+        - containerPort: 8080
+          name: http
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: my-service
+spec:
+  selector:
+    app: nginx
+  ports:
+    - name: http
+      port: 80
+      targetPort: http
+
+
+
+
+
Get Logs
oc logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
Get Logs
kubectl logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
+

+
+
+
+
+
Get Service
oc get svc
+
+
Get Service Description
oc describe svc my-service
+
+
Expose a Service
oc expose service <service_name>
+
+
Get Route for the Service
oc get route
+
+
+
+
Get Service
kubectl get svc
+
+
Get Service Description
kubectl describe svc my-service
+
+
Get Service Endpoints
kubectl get ep my-service
+
+
Expose a Deployment via a Service
kubectl expose deployment my-deployment --port 80 --target-port=http --selector app=nginx --name my-service-2 --type NodePort
+
+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Creating ServicesCreate two services with certain requirements.Setting up Services
IKS Ingress ControllerConfigure Ingress on Free IKS ClusterSetting IKS Ingress
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/services-networking/ingress/index.html b/openshift/services-networking/ingress/index.html new file mode 100644 index 0000000..091be99 --- /dev/null +++ b/openshift/services-networking/ingress/index.html @@ -0,0 +1,1896 @@ + + + + + + + + + + + + + + + + + + + + + + + Ingresses - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Ingress

+

An API object that manages external access to the services in a cluster, typically HTTP.

+

Ingress can provide load balancing, SSL termination and name-based virtual hosting.

+

Ingress exposes HTTP and HTTPS routes from outside the cluster to services within the cluster. Traffic routing is controlled by rules defined on the Ingress resource.

+

Resources

+ +

References

+
apiVersion: networking.k8s.io/v1beta1 # for versions before 1.14 use extensions/v1beta1
+kind: Ingress
+metadata:
+  name: example-ingress
+spec:
+  rules:
+  - host: hello-world.info
+    http:
+      paths:
+      - path: /
+        backend:
+          serviceName: web
+          servicePort: 8080
+
+
+
+
+
View Ingress Status
oc describe clusteroperators/ingress
+
+
Describe default Ingress Controller
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
+
+
+
+

Describe default Ingress Controller
kubectl get pods -n kube-system | grep ingress
+
+
kubectl create deployment web --image=bitnami/nginx
+
+
kubectl expose deployment web --name=web --port 8080
+
+
kubectl get svc web
+
+
kubectl get ingress
+
+
kubcetl describe ingress example-ingress
+
+
curl hello-world.info --resolve hello-world.info:80:<ADDRESS>
+

+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
IKS Ingress ControllerConfigure Ingress on Free IKS ClusterSetting IKS Ingress
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/services-networking/routes/index.html b/openshift/services-networking/routes/index.html new file mode 100644 index 0000000..7442472 --- /dev/null +++ b/openshift/services-networking/routes/index.html @@ -0,0 +1,1868 @@ + + + + + + + + + + + + + + + + + + + + + + + Routes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Routes

+

(OpenShift Only)

+

Routes are Openshift objects that expose services for external clients to reach them by name.

+

Routes can insecured or secured on creation using certificates.

+

The new route inherits the name from the service unless you specify one using the --name option.

+

Resources

+ +

References

+

** Route Creation ** +

apiVersion: v1
+kind: Route
+metadata:
+  name: frontend
+spec:
+  to:
+    kind: Service
+    name: frontend
+
+** Secured Route Creation ** +
apiVersion: v1
+kind: Route
+metadata:
+  name: frontend
+spec:
+  to:
+    kind: Service
+    name: frontend
+  tls:
+    termination: edge
+

+

Commands

+
+
+
+
Create Route from YAML
oc apply -f route.yaml
+
+
Get Route
oc get route
+
+
Describe Route
oc get route <route_name>
+
+
Get Route YAML
oc get route <route_name> -o yaml
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/services-networking/services/index.html b/openshift/services-networking/services/index.html new file mode 100644 index 0000000..4a8850d --- /dev/null +++ b/openshift/services-networking/services/index.html @@ -0,0 +1,1935 @@ + + + + + + + + + + + + + + + + + + + + + + + Services - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Services

+

An abstract way to expose an application running on a set of Pods as a network service.

+

Kubernetes Pods are mortal. They are born and when they die, they are not resurrected. If you use a Deployment to run your app, it can create and destroy Pods dynamically.

+

Each Pod gets its own IP address, however in a Deployment, the set of Pods running in one moment in time could be different from the set of Pods running that application a moment later.

+

In Kubernetes, a Service is an abstraction which defines a logical set of Pods and a policy by which to access them (sometimes this pattern is called a micro-service). The set of Pods targeted by a Service is usually determined by a selector (see below for why you might want a Service without a selector).

+

If you’re able to use Kubernetes APIs for service discovery in your application, you can query the API server for Endpoints, that get updated whenever the set of Pods in a Service changes.

+

For non-native applications, Kubernetes offers ways to place a network port or load balancer in between your application and the backend Pods.

+

Resources

+
+ +
+

References

+
apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: my-deployment
+  labels:
+    app: nginx
+    version: v1
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+        version: v1
+    spec:
+      containers:
+      - name: nginx
+        image: bitnami/nginx
+        ports:
+        - containerPort: 8080
+          name: http
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: my-service
+spec:
+  selector:
+    app: nginx
+  ports:
+    - name: http
+      port: 80
+      targetPort: http
+
+
+
+
+
Get Logs
oc logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
Get Logs
kubectl logs
+
+
Use Stern to View Logs
brew install stern
+stern . -n default
+
+
+
+
+

+
+
+
+
+
Get Service
oc get svc
+
+
Get Service Description
oc describe svc my-service
+
+
Expose a Service
oc expose service <service_name>
+
+
Get Route for the Service
oc get route
+
+
+
+
Get Service
kubectl get svc
+
+
Get Service Description
kubectl describe svc my-service
+
+
Get Service Endpoints
kubectl get ep my-service
+
+
Expose a Deployment via a Service
kubectl expose deployment my-deployment --port 80 --target-port=http --selector app=nginx --name my-service-2 --type NodePort
+
+
+
+
+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Creating ServicesCreate two services with certain requirements.Setting up Services
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/state-persistence/index.html b/openshift/state-persistence/index.html new file mode 100644 index 0000000..ed21835 --- /dev/null +++ b/openshift/state-persistence/index.html @@ -0,0 +1,1791 @@ + + + + + + + + + + + + + + + + + + + + + + + State Persistence - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

State Persistence

+

State persistence in the context of Kubernetes/OpenShift refers to the ability to maintain and retain the state or data of applications even when they are stopped, restarted, or moved between nodes. This is achieved through the use of volumes, persistent volumes (PVs), and persistent volume claims (PVCs). Volumes provide a way to store and access data in a container, while PVs serve as the underlying storage resources provisioned by the cluster. PVCs act as requests made by applications for specific storage resources from the available PVs. By utilizing PVs and PVCs, applications can ensure that their state is preserved and accessible across pod restarts and migrations, enabling reliable and consistent data storage and retrieval throughout the cluster.

+

Resources

+

Volumes

+

Persistent Volumes

+

Persistent Volume Claims

+

References

+

apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: my-pvc
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+
+In this example, we define a PVC named my-pvc with the following specifications:

+

accessModes specify that the volume can be mounted as read-write by a single node at a time (ReadWriteOnce). +resources.requests.storage specifies the requested storage size for the PVC (1Gi).

+

Activities

+ + + + + + + + + + + + + + + + + + + + +
TaskDescriptionLink
*** Try It Yourself ***
Setting up Persistent VolumesCreate a Persistent Volume that's accessible from a SQL Pod.Setting up Persistent Volumes
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/state-persistence/pv-pvc/index.html b/openshift/state-persistence/pv-pvc/index.html new file mode 100644 index 0000000..cd64f88 --- /dev/null +++ b/openshift/state-persistence/pv-pvc/index.html @@ -0,0 +1,1880 @@ + + + + + + + + + + + + + + + + + + + + + + + Persistent Volumes & Claims - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

PersistentVolumes and Claims

+

Managing storage is a distinct problem from managing compute instances. The PersistentVolume subsystem provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed.

+

A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.

+

A PersistentVolumeClaim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted once read/write or many times read-only).

+

While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than just size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the StorageClass resource.

+

Pods access storage by using the claim as a volume. Claims must exist in the same namespace as the Pod using the claim. The cluster finds the claim in the Pod’s namespace and uses it to get the PersistentVolume backing the claim. The volume is then mounted to the host and into the Pod.

+

PersistentVolumes binds are exclusive, and since PersistentVolumeClaims are namespaced objects, mounting claims with “Many” modes (ROX, RWX) is only possible within one namespace.

+

Resources

+ +

References

+
kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: my-pv
+spec:
+  storageClassName: local-storage
+  capacity:
+    storage: 128Mi
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: "/mnt/data-1"
+
+
apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: my-pvc
+spec:
+  storageClassName: local-storage
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100Mi
+
+
kind: Pod
+apiVersion: v1
+metadata:
+  name: my-pod
+spec:
+  containers:
+  - name: nginx
+    image: busybox
+    command: ['sh', '-c', 'echo $(date):$HOSTNAME Hello Kubernetes! >> /mnt/data/message.txt && sleep 3600']
+    volumeMounts:
+    - mountPath: "/mnt/data"
+      name: my-data
+  volumes:
+  - name: my-data
+    persistentVolumeClaim:
+      claimName: my-pvc
+
+
+
+
+
Get the Persistent Volumes in Project
oc get pv
+
+
Get the Persistent Volume Claims
oc get pvc
+
+
Get a specific Persistent Volume
oc get pv <pv_claim>
+
+
+
+
Get the Persistent Volume
kubectl get pv
+
+
Get the Persistent Volume Claims
kubectl get pvc
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/openshift/state-persistence/volumes/index.html b/openshift/state-persistence/volumes/index.html new file mode 100644 index 0000000..37bb543 --- /dev/null +++ b/openshift/state-persistence/volumes/index.html @@ -0,0 +1,1848 @@ + + + + + + + + + + + + + + + + + + + + + + + Volumes - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Volumes

+

On-disk files in a Container are ephemeral, which presents some problems for non-trivial applications when running in Containers. First, when a Container crashes, kubelet will restart it, but the files will be lost - the Container starts with a clean state. Second, when running Containers together in a Pod it is often necessary to share files between those Containers. The Kubernetes Volume abstraction solves both of these problems.

+

Docker also has a concept of volumes, though it is somewhat looser and less managed. In Docker, a volume is simply a directory on disk or in another Container.

+

A Kubernetes volume, on the other hand, has an explicit lifetime - the same as the Pod that encloses it. Consequently, a volume outlives any Containers that run within the Pod, and data is preserved across Container restarts. Of course, when a Pod ceases to exist, the volume will cease to exist, too. Perhaps more importantly than this, Kubernetes supports many types of volumes, and a Pod can use any number of them simultaneously.

+

Resources

+
+ +
+

References

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: my-pod
+spec:
+  containers:
+  - image: busybox
+    command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600']
+    name: busybox
+    volumeMounts:
+    - mountPath: /cache
+      name: cache-volume
+  volumes:
+  - name: cache-volume
+    emptyDir: {}
+
+
apiVersion: v1
+kind: Pod
+metadata:
+  name: test-pd
+spec:
+  containers:
+  - image: bitnami/nginx
+    name: test-container
+    volumeMounts:
+    - mountPath: /test-pd
+      name: test-volume
+  volumes:
+  - name: test-volume
+    hostPath:
+      # directory location on host
+      path: /data
+      # this field is optional
+      type: Directory
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/prerequisites/index.html b/prerequisites/index.html new file mode 100644 index 0000000..e60d9bd --- /dev/null +++ b/prerequisites/index.html @@ -0,0 +1,1666 @@ + + + + + + + + + + + + + + + + + + + + + + + Prerequisites - Cloud Native Bootcamp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Prerequisites

+

Required skills

+

This activities contained here require you to be proficient in working from the command line with a linux shell (Bash, Zsh, etc.) Below is a partial list of activities you should be able to perform.

+
    +
  • Copy, move, and rename files
  • +
  • Understand linux file permissions
  • +
  • Edit text files (vi, vim, emacs, etc)
  • +
  • Edit environment variables ($PATH)
  • +
+

Here is a course for learning (or brushing up) on working from the linux command line Linux Command Line Basics

+

Workstation Setup

+
+
+
+

Create accounts

+

You'll need these accounts to use the Developer Tools environment.

+
    +
  • +

    GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

    +
  • +
  • +

    IBM Cloud Account: Create one if needed, make sure you can log in.

    +
  • +
  • +

    O'Reilly Account: The account is free and easy to create.

    +
  • +
  • +

    RedHat Account: Needed for CodeReady Containers.

    +
  • +
+

Run System Check Script

+

Run the following command in your terminal to check which tools need to be installed.

+

Using wget:

+
wget -O - https://cloudbootcamp.dev/scripts/system-check.sh | sh
+
+

Using curl:

+
curl -s https://cloudbootcamp.dev/scripts/system-check.sh | sh
+
+

After the script is run, make sure to install any missing tools.

+

Install CLIs and tools

+

The following is a list of desktop tools required to help with installation and development.

+
    +
  • +

    Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

    +
  • +
  • +

    IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    + +
  • +
+
+

Note

+

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

+
+
    +
  • +

    Podman Desktop: Required for building and running container images.

    +
      +
    • Installed and running on your local machine
    • +
    +
  • +
  • +

    Tekton CLI: Used to help control Tekton +pipelines from the command line. +

        brew tap tektoncd/tools
    +    brew install tektoncd/tools/tektoncd-cli
    +

    +
  • +
  • +

    Visual Studio Code: A popular code editor

    + +
  • +
  • +

    JDK 11: Optional installed on your local machine

    +
      +
    • Used for SpringBoot content
    • +
    +
  • +
+
+
+

Create accounts

+

You'll need these accounts to use the Developer Tools environment.

+
    +
  • +

    GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

    +
  • +
  • +

    IBM Cloud Account: Create one if needed, make sure you can log in.

    +
  • +
  • +

    O'Reilly Account: The account is free and easy to create.

    +
  • +
  • +

    RedHat Account: Needed for CodeReady Containers.

    +
  • +
+

Cloud Native VM

+

Use the Cloud Native VM it comes pre-installed with kubernetes and all cloud native CLIs.

+

Is highly recommended for Windows users to use this VM.

+

Install CLIs and tools

+

The following is a list of desktop tools required to help with installation and development.

+
    +
  • +

    Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

    +
  • +
  • +

    IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    + +
  • +
+
+

Note

+

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

+
+ +

+

Warning: Make sure you have Cisco VPN turned off when using CRC.

+

+
+
+

Create accounts

+

You'll need these accounts to use the Developer Tools environment.

+
    +
  • +

    GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

    +
  • +
  • +

    IBM Cloud Account: Create one if needed, make sure you can log in.

    +
  • +
  • +

    O'Reilly Account: The account is free and easy to create.

    +
  • +
+

Run System Check Script

+

Run the following command in your terminal to check which tools need to be installed.

+

Using wget: +

wget -O - https://cloudbootcamp.dev/scripts/system-check.sh | sh
+

+

Using curl: +

curl -s https://cloudbootcamp.dev/scripts/system-check.sh | sh
+

+

After the script is run, make sure to install any missing tools.

+

Install CLIs and tools

+

The following is a list of desktop tools required to help with installation and development.

+
    +
  • +

    Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

    +
  • +
  • +

    IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    + +
  • +
+

!!! Note + If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

+
    +
  • +

    Podman Desktop: Required for building and running container images.

    +
      +
    • Installed and running on your local machine
    • +
    +
  • +
  • +

    Tekton CLI: Used to help control Tekton +pipelines from the command line. +

        brew tap tektoncd/tools
    +    brew install tektoncd/tools/tektoncd-cli
    +

    +
  • +
  • +

    Visual Studio Code: A popular code editor

    + +
  • +
  • +

    JDK 11: Optional installed on your local machine

    +
      +
    • Used for SpringBoot content
    • +
    +
  • +
  • +

    Minikube: Follow the instructions for your Operating System.

    +
  • +
+

+

Warning: Make sure you have Cisco VPN turned off when using minikube.

+

+
+
+

Create accounts

+

You'll need these accounts to use the Developer Tools environment.

+
    +
  • +

    GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

    +
  • +
  • +

    IBM Cloud Account: Create one if needed, make sure you can log in.

    +
  • +
  • +

    O'Reilly Account: The account is free and easy to create.

    +
  • +
+

Cloud Native VM

+

Use the Cloud Native VM it comes pre-installed with kubernetes and all cloud native CLIs.

+

Is highly recommended for Windows users to use this VM.

+

Install CLIs and tools

+

The following is a list of desktop tools required to help with installation and development.

+
    +
  • +

    Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

    +
  • +
  • +

    IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    + +
  • +
+
+

Note

+

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

+
+
    +
  • +

    Podman Desktop: Required for building and running container images.

    +
      +
    • Installed and running on your local machine
    • +
    +
  • +
  • +

    Tekton CLI: Used to help control Tekton +pipelines from the command line. +

        brew tap tektoncd/tools
    +    brew install tektoncd/tools/tektoncd-cli
    +

    +
  • +
  • +

    Visual Studio Code: A popular code editor

    + +
  • +
  • +

    JDK 11: Optional installed on your local machine

    +
      +
    • Used for SpringBoot content
    • +
    +
  • +
  • +

    Minikube: Follow the instructions for your Operating System.

    +
  • +
+

+

Warning: Make sure you have Cisco VPN turned off when using minikube.

+

+
+
+
+

Environment Setup

+
+
+
+
    +
  • Verify your cluster has 4GB+ memory, and kubernetes 1.16+ +
    minikube config view
    +
  • +
  • Verify your vm-driver is set for hyperkit +
    minikube config set vm-driver hyperkit
    +
  • +
  • In case memory is not set, or need to increase set the memory and recreate the VM +
    minikube config set memory 4096
    +minikube config set kubernetes-version v1.16.6
    +minikube delete
    +minikube start
    +
  • +
  • Kubernetes should be v1.15+ +
    kubectl version
    +
  • +
+
+
+

Make sure CRC is installed. Check out the CRC Page

+

** Setup CRC ** +

crc setup
+
+** Start CRC ** +
crc start
+

+
+
+
    +
  • +

    Login to IBM Cloud with your IBM ID.

    +
  • +
  • +

    Click "Create Resource" and search for "kubernetes service".

    +
  • +
  • +

    Select the tile for "Kubernetes Service" and do the following:

    +
  • +
  • Select the "Free Cluster" plan.
  • +
  • Name your cluster.
  • +
  • +

    Select "Create" at the bottom right of the screen.

    +
  • +
  • +

    Once the Cluster is provisioned, Click on the "Connect via CLI" in the top right corner.

    +
  • +
  • +

    Follow the instructions to connect and you are set to go.

    +
  • +
+
+
+
    +
  • +

    In this approach you share an OpenShift cluster on IBM Cloud with other bootcamp attendees.

    +
  • +
  • +

    Considering 10-15 attendees we recommend a cluster with 3 worker nodes (each 8 vCPUs + 32GB RAM - b3c.8x32).

    +
  • +
  • +

    Ask your IBM cloud account owner to provide access to an OpenShift cluster.

    +
  • +
  • +

    In addition to the IBM Cloud CLI also install the OpenShift Origin CLI to be able to execute all commands.

    +
  • +
  • +

    Open your OpenShift web console from within your IBM cloud account, select your profile and choose "copy login command" to retrieve an access token for the login.

    +
  • +
  • +

    Login with your OpenShift Origin CLI. +

    oc login --token=<token> --server=<server-url>:<server-port>
    +

    +
  • +
  • +

    Create your own project / namespace in OpenShift that you will leverage across all labs. +

    oc new-project <dev-your_initials>
    +

    +
  • +
  • +

    Validate in the OpenShift web console that your project has been created (Administrator view -> Home -> Projects)

    +
  • +
+
+
+
+

Next Steps

+

Once Setup is complete, you can now begin reading our about [Cloud Native](./cloud-native/index.md by clicking the link, or the Next button below.

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/scripts/system-check.sh b/scripts/system-check.sh new file mode 100644 index 0000000..bcf09ce --- /dev/null +++ b/scripts/system-check.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +echo 'Running System Checks' + +if hash ibmcloud 2>/dev/null +then + printf '\xE2\x9C\x85 IBM Cloud CLI \n' +else + printf '\xE2\x9D\x8C IBM Cloud CLI \n \n' + + printf 'Download the IBM Cloud CLI using the links below: \n' + printf 'For All Users: https://cloud.ibm.com/docs/cli/reference/ibmcloud?topic=cloud-cli-install-ibmcloud-cli \n \n ' +fi + +if hash git 2>/dev/null +then + printf '\xE2\x9C\x85 Git CLI \n' +else + printf '\xE2\x9D\x8C Git CLI \n \n' + + printf 'Download the Git CLI using the links below: \n' + printf 'For All Users: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git \n \n' +fi + +if hash crc 2>/dev/null +then + printf '\xE2\x9C\x85 CRC \n' +else + printf '\xE2\x9D\x8C CRC \n \n' + + printf '* A RedHat Account is Required * \n' + printf 'Download CRC using the links below: \n' + printf 'For All Users: https://cloud.redhat.com/openshift/install/crc/installer-provisioned \n \n' +fi + +if hash minikube 2>/dev/null +then + printf '\xE2\x9C\x85 Minikube \n' +else + printf '\xE2\x9D\x8C Minikube \n \n' + + printf 'Download Minikube using the links below: \n' + printf 'For All Users: https://kubernetes.io/docs/tasks/tools/install-minikube/ \n \n' +fi + +if hash docker 2>/dev/null +then + printf '\xE2\x9C\x85 Docker CLI \n' +else + printf '\xE2\x9D\x8C Docker CLI \n \n' + + printf 'Download the Docker CLI using the links below: \n' + printf 'For Mac: https://docs.docker.com/docker-for-mac/install/ \n' + printf 'For Linux Users: https://docs.docker.com/engine/install/ubuntu/ \n' + printf 'For Windows: https://docs.docker.com/docker-for-windows/install/ \n \n' + +fi + +if hash kubectl 2>/dev/null +then + printf '\xE2\x9C\x85 Kubernetes CLI \n' +else + printf '\xE2\x9D\x8C Kubernetes CLI \n \n' + + printf 'Download the Kubernetes CLI using the links below: \n' + printf 'For All Users: https://kubernetes.io/docs/tasks/tools/install-kubectl/ \n \n' +fi + +if hash oc 2>/dev/null +then + printf '\xE2\x9C\x85 Openshift CLI \n' +else + printf '\xE2\x9D\x8C Openshift CLI \n \n' + + printf 'Download the Openshift CLI using the links below: \n' + printf 'For All Users: https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/ \n \n' +fi + +if hash tkn 2>/dev/null +then + printf '\xE2\x9C\x85 Tekton CLI \n' +else + printf '\xE2\x9D\x8C Tekton CLI \n \n' + + printf 'Download the Tekton CLI using the links below: \n' + printf 'For All Users: https://github.com/tektoncd/cli#installing-tkn \n \n' +fi + +if hash argocd 2>/dev/null +then + printf '\xE2\x9C\x85 Argo CLI \n' +else + printf '\xE2\x9D\x8C Argo CLI \n \n' + + printf 'Download the Argo CLI using the links below: \n' + printf 'For All Users: https://argoproj.github.io/argo-cd/cli_installation/ \n \n' +fi \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..953b561 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"IBM Cloud Native Bootcamp","text":""},{"location":"#concepts-covered","title":"Concepts Covered","text":"
  1. Cloud Native - open Cloud Native to read more.
  2. Containers - open Containers to read more.
  3. Kubernetes - open Kubernetes to read more.
  4. DevOps - open Devops to read more.
    • Continuous Integration (CI)
    • Continuous Deployment (CD)
"},{"location":"#test-your-knowledge","title":"Test your Knowledge","text":"

After taking the virtual bootcamp with an instructor from IBM Garage or learning on your own self paced you can take the quizzes and even get a Badge to show off your Cloud Native street cred.

"},{"location":"agenda/","title":"Agenda","text":"

The following table lists the topics and coding activities for the week. Click on the name of the topic to open a pdf of the material. Click on the link to the solution code to view the solution.

In-PersonSelf Paced"},{"location":"agenda/#day-1","title":"Day 1","text":"Topic Type of Activity Kickoff Activity Introductions Activity Introduction Cloud Native Presentation Containers Presentation Container Activities Activity Lunch Activity Container Activities (Cont.) Activity Kubernetes Presentation Wrap up"},{"location":"agenda/#day-2","title":"Day 2","text":"Topic Type of Activity Recap and review from Monday; Q&A Presentation Kubernetes Activities Activity Lunch Activity Kubernetes Presentation Wrap up"},{"location":"agenda/#day-3","title":"Day 3","text":"Topic Type of Activity Recap and review from Tuesday; Q&A Presentation Kubernetes Activities Activity Continuous Integration Presentation Lunch Activity Continuous Integration Lab Activity Continuous Deployment Presentation Wrap up"},{"location":"agenda/#day-4","title":"Day 4","text":"Topic Type of Activity Recap and review from Wednesday; Q&A Presentation Continuous Deployment Lab Activity Lunch Project Work Activity"},{"location":"agenda/#day-5","title":"Day 5","text":"Topic Type of Activity Recap and review from Thursday ; Q&A Presentation Project Work Activity Retrospective Activity"},{"location":"agenda/#modules","title":"Modules","text":"Topic Type of Activity Duration Containers Presentation 1 Hour Container Activities Activity 30 mins Kubernetes Presentation 6 Hours Kubernetes Activities Activity 4 Hours Continuous Integration Presentation 1 Hour Continuous Integration Lab Activity 1 Hour Continuous Deployment Presentation 1 Hour Continuous Deployment Lab Activity 1 Hour Project Work Activity 2 Hours"},{"location":"cloudnative-challenge/","title":"Cloud Native Challenge","text":""},{"location":"cloudnative-challenge/#phase-1-local-develop","title":"Phase 1 - Local Develop","text":"
  • Start by creating a Github Repo for your application.
  • Choose NodeJS, Python, or React.
  • Site about one of the following:
    • Yourself
    • Hobby
    • Place you live
  • Must be able to run locally
"},{"location":"cloudnative-challenge/#application-requirements","title":"Application Requirements","text":"
  • Minimum of 3 webpages
  • Minimum of 1 GET and POST method each.
  • SwaggerUI Configured for API Testing.
  • API's exposed through Swagger
  • Custom CSS files for added formatting.
"},{"location":"cloudnative-challenge/#testing","title":"Testing","text":"

Setup each of the following tests that apply:

  • Page tests
  • API tests
  • Connection Tests
"},{"location":"cloudnative-challenge/#phase-2-application-enhancements","title":"Phase 2 - Application Enhancements","text":""},{"location":"cloudnative-challenge/#database-connectivity-and-functionality","title":"Database Connectivity and Functionality","text":"
  • Add local or cloud DB to use for data collection.
  • Use 3rd party API calls to get data.
    • Post Data to DB via API Call
    • Retrieve Data from DB via API Call
    • Delete Data from DB via API Call
"},{"location":"cloudnative-challenge/#phase-3-containerize","title":"Phase 3 - Containerize","text":""},{"location":"cloudnative-challenge/#container-image","title":"Container Image","text":"
  • Create a DockerFile
  • Build your docker image from the dockerfile
  • Run it locally via Docker Desktop or another docker engine.
"},{"location":"cloudnative-challenge/#image-registries","title":"Image Registries","text":"
  • Once validation of working docker image, push the image up to a registry.
  • Use one of the following registries:
    • Docker
    • Quay.io
    • IBM Container
  • Push the image up with the following name: {DockerRegistry}/{yourusername}/techdemos-cn:v1
"},{"location":"cloudnative-challenge/#phase-4-kubernetes-ready","title":"Phase 4 - Kubernetes Ready","text":""},{"location":"cloudnative-challenge/#create-pod-and-deployment-files","title":"Create Pod and Deployment files","text":"
  • Create a Pod YAML to validate your image.
  • Next, create a deployment yaml file with the setting of 3 replicas.
  • Verify starting of deployment
  • Push all YAML files to Github
"},{"location":"cloudnative-challenge/#application-exposing","title":"Application Exposing","text":"
  • Create a Service and Route yaml
  • Save Service and Route yamls in Github
"},{"location":"cloudnative-challenge/#configuration-setup","title":"Configuration Setup","text":"
  • Create a ConfigMap for all site configuration.
  • Setup Secrets for API keys or Passwords to 3rd parties.
  • Add storage where needed to deployment.
"},{"location":"cloudnative-challenge/#phase-5-devopsgitops","title":"Phase 5 - Devops/Gitops","text":""},{"location":"cloudnative-challenge/#tekton-pipeline-setup","title":"Tekton Pipeline Setup","text":"
  • Create a Tekton pipeline to do the following:
    • Setup
    • Test
    • Build and Push Image
    • GitOps Version Update
  • Make each of the above their own task.
  • Setup triggers to respond to Github commits and PR's
"},{"location":"cloudnative-challenge/#gitsops-configuration","title":"GitsOps Configuration","text":"
  • Use ArgoCD to setup Deployment.
  • Test your ArgoCD deployment
  • Make a change to site and push them.
  • Validate new image version.
"},{"location":"cloudnative-challenge/#extras","title":"Extras","text":""},{"location":"cloudnative-challenge/#chatbot-functions","title":"Chatbot Functions","text":"
  • Watson Assistant Integration
  • Conversation about your sites topic.
  • Have Chat window or page.
  • Integrate Watson Assistant Actions.
"},{"location":"prerequisites/","title":"Prerequisites","text":""},{"location":"prerequisites/#required-skills","title":"Required skills","text":"

This activities contained here require you to be proficient in working from the command line with a linux shell (Bash, Zsh, etc.) Below is a partial list of activities you should be able to perform.

  • Copy, move, and rename files
  • Understand linux file permissions
  • Edit text files (vi, vim, emacs, etc)
  • Edit environment variables ($PATH)

Here is a course for learning (or brushing up) on working from the linux command line Linux Command Line Basics

"},{"location":"prerequisites/#workstation-setup","title":"Workstation Setup","text":"Openshift (MacOS/Linux)Openshift (Windows)Kubernetes (MacOS/Linux)Kubernetes (Windows)"},{"location":"prerequisites/#create-accounts","title":"Create accounts","text":"

You'll need these accounts to use the Developer Tools environment.

  • GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

  • IBM Cloud Account: Create one if needed, make sure you can log in.

  • O'Reilly Account: The account is free and easy to create.

  • RedHat Account: Needed for CodeReady Containers.

"},{"location":"prerequisites/#run-system-check-script","title":"Run System Check Script","text":"

Run the following command in your terminal to check which tools need to be installed.

Using wget:

wget -O - https://cloudbootcamp.dev/scripts/system-check.sh | sh\n

Using curl:

curl -s https://cloudbootcamp.dev/scripts/system-check.sh | sh\n

After the script is run, make sure to install any missing tools.

"},{"location":"prerequisites/#install-clis-and-tools","title":"Install CLIs and tools","text":"

The following is a list of desktop tools required to help with installation and development.

  • Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

  • IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    • Don't install just the IBM Cloud CLI, install the IBM Cloud CLI and Developer Tools
      curl -sL https://ibm.biz/idt-installer | bash\n

Note

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

  • Podman Desktop: Required for building and running container images.

    • Installed and running on your local machine
  • Tekton CLI: Used to help control Tekton pipelines from the command line.

        brew tap tektoncd/tools\n    brew install tektoncd/tools/tektoncd-cli\n

  • Visual Studio Code: A popular code editor

    • You will be required to edit some files, having a good quality editor is always best practice
    • Enabling launching VSCode from a terminal
  • JDK 11: Optional installed on your local machine

    • Used for SpringBoot content
"},{"location":"prerequisites/#create-accounts_1","title":"Create accounts","text":"

You'll need these accounts to use the Developer Tools environment.

  • GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

  • IBM Cloud Account: Create one if needed, make sure you can log in.

  • O'Reilly Account: The account is free and easy to create.

  • RedHat Account: Needed for CodeReady Containers.

"},{"location":"prerequisites/#cloud-native-vm","title":"Cloud Native VM","text":"

Use the Cloud Native VM it comes pre-installed with kubernetes and all cloud native CLIs.

Is highly recommended for Windows users to use this VM.

"},{"location":"prerequisites/#install-clis-and-tools_1","title":"Install CLIs and tools","text":"

The following is a list of desktop tools required to help with installation and development.

  • Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

  • IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    • Don't install just the IBM Cloud CLI, install the IBM Cloud CLI and Developer Tools
      curl -sL https://ibm.biz/idt-installer | bash\n

Note

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

  • Podman Desktop: Required for building and running container images.

    • Installed and running on your local machine
  • Tekton CLI: Used to help control Tekton pipelines from the command line.

  • Visual Studio Code: A popular code editor

    • You will be required to edit some files, having a good quality editor is always best practice
    • Enabling launching VSCode from a terminal
  • JDK 11: Optional installed on your local machine

    • Used for SpringBoot content
  • OpenShift CodeReady Containers (CRC)

Warning: Make sure you have Cisco VPN turned off when using CRC.

"},{"location":"prerequisites/#create-accounts_2","title":"Create accounts","text":"

You'll need these accounts to use the Developer Tools environment.

  • GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

  • IBM Cloud Account: Create one if needed, make sure you can log in.

  • O'Reilly Account: The account is free and easy to create.

"},{"location":"prerequisites/#run-system-check-script_1","title":"Run System Check Script","text":"

Run the following command in your terminal to check which tools need to be installed.

Using wget:

wget -O - https://cloudbootcamp.dev/scripts/system-check.sh | sh\n

Using curl:

curl -s https://cloudbootcamp.dev/scripts/system-check.sh | sh\n

After the script is run, make sure to install any missing tools.

"},{"location":"prerequisites/#install-clis-and-tools_2","title":"Install CLIs and tools","text":"

The following is a list of desktop tools required to help with installation and development.

  • Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

  • IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    • Don't install just the IBM Cloud CLI, install the IBM Cloud CLI and Developer Tools
      curl -sL https://ibm.biz/idt-installer | bash\n

!!! Note If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

  • Podman Desktop: Required for building and running container images.

    • Installed and running on your local machine
  • Tekton CLI: Used to help control Tekton pipelines from the command line.

        brew tap tektoncd/tools\n    brew install tektoncd/tools/tektoncd-cli\n

  • Visual Studio Code: A popular code editor

    • You will be required to edit some files, having a good quality editor is always best practice
    • Enabling launching VSCode from a terminal
  • JDK 11: Optional installed on your local machine

    • Used for SpringBoot content
  • Minikube: Follow the instructions for your Operating System.

Warning: Make sure you have Cisco VPN turned off when using minikube.

"},{"location":"prerequisites/#create-accounts_3","title":"Create accounts","text":"

You'll need these accounts to use the Developer Tools environment.

  • GitHub account (public, not enterprise): Create one if you do not have one aleady. If you have not logged in for a while, make sure your login is working.

  • IBM Cloud Account: Create one if needed, make sure you can log in.

  • O'Reilly Account: The account is free and easy to create.

"},{"location":"prerequisites/#cloud-native-vm_1","title":"Cloud Native VM","text":"

Use the Cloud Native VM it comes pre-installed with kubernetes and all cloud native CLIs.

Is highly recommended for Windows users to use this VM.

"},{"location":"prerequisites/#install-clis-and-tools_3","title":"Install CLIs and tools","text":"

The following is a list of desktop tools required to help with installation and development.

  • Git Client: Needs to be installed in your development operating system, it comes as standard for Mac OS

  • IBM Cloud CLI: Required for management of IBM Cloud Account and management of your managed IBM Kubernetes and Red Hat OpenShift clusters

    • Don't install just the IBM Cloud CLI, install the IBM Cloud CLI and Developer Tools
      curl -sL https://ibm.biz/idt-installer | bash\n

Note

If you log in to the web UI using SSO, you'll need to create an API key for logging into the CLI.

  • Podman Desktop: Required for building and running container images.

    • Installed and running on your local machine
  • Tekton CLI: Used to help control Tekton pipelines from the command line.

        brew tap tektoncd/tools\n    brew install tektoncd/tools/tektoncd-cli\n

  • Visual Studio Code: A popular code editor

    • You will be required to edit some files, having a good quality editor is always best practice
    • Enabling launching VSCode from a terminal
  • JDK 11: Optional installed on your local machine

    • Used for SpringBoot content
  • Minikube: Follow the instructions for your Operating System.

Warning: Make sure you have Cisco VPN turned off when using minikube.

"},{"location":"prerequisites/#environment-setup","title":"Environment Setup","text":"MiniKubeCRC (MiniShift)IKSOpenShift on IBM Cloud (4.x)
  • Verify your cluster has 4GB+ memory, and kubernetes 1.16+
    minikube config view\n
  • Verify your vm-driver is set for hyperkit
    minikube config set vm-driver hyperkit\n
  • In case memory is not set, or need to increase set the memory and recreate the VM
    minikube config set memory 4096\nminikube config set kubernetes-version v1.16.6\nminikube delete\nminikube start\n
  • Kubernetes should be v1.15+
    kubectl version\n

Make sure CRC is installed. Check out the CRC Page

** Setup CRC **

crc setup\n
** Start CRC **
crc start\n

  • Login to IBM Cloud with your IBM ID.

  • Click \"Create Resource\" and search for \"kubernetes service\".

  • Select the tile for \"Kubernetes Service\" and do the following:

  • Select the \"Free Cluster\" plan.
  • Name your cluster.
  • Select \"Create\" at the bottom right of the screen.

  • Once the Cluster is provisioned, Click on the \"Connect via CLI\" in the top right corner.

  • Follow the instructions to connect and you are set to go.

  • In this approach you share an OpenShift cluster on IBM Cloud with other bootcamp attendees.

  • Considering 10-15 attendees we recommend a cluster with 3 worker nodes (each 8 vCPUs + 32GB RAM - b3c.8x32).

  • Ask your IBM cloud account owner to provide access to an OpenShift cluster.

  • In addition to the IBM Cloud CLI also install the OpenShift Origin CLI to be able to execute all commands.

  • Open your OpenShift web console from within your IBM cloud account, select your profile and choose \"copy login command\" to retrieve an access token for the login.

  • Login with your OpenShift Origin CLI.

    oc login --token=<token> --server=<server-url>:<server-port>\n

  • Create your own project / namespace in OpenShift that you will leverage across all labs.

    oc new-project <dev-your_initials>\n

  • Validate in the OpenShift web console that your project has been created (Administrator view -> Home -> Projects)

"},{"location":"prerequisites/#next-steps","title":"Next Steps","text":"

Once Setup is complete, you can now begin reading our about [Cloud Native](./cloud-native/index.md by clicking the link, or the Next button below.

"},{"location":"cloud-native/","title":"Cloud-Native","text":""},{"location":"cloud-native/#introduction","title":"Introduction","text":"

Cloud is everywhere. Today, many companies want to migrate their applications on to cloud. For this migration to be done, the applications must be re-architected in a way that they fully utilize the advantages of the cloud.

"},{"location":"cloud-native/#presentations","title":"Presentations","text":"

Cloud-Native Presentation

"},{"location":"cloud-native/#what-is-cloud-native","title":"What is Cloud-Native?","text":"

Cloud-native is about how we build and run applications taking full advantage of cloud computing rather than worrying about where we deploy it.

Cloud-native refers less to where an application resides and more to how it is built and deployed.

  • A cloud-native application consists of discrete, reusable components known as microservices that are designed to integrate into any cloud environment.

  • These microservices act as building blocks and are often packaged in containers.

  • Microservices work together as a whole to comprise an application, yet each can be independently scaled, continuously improved, and quickly iterated through automation and orchestration processes.

  • The flexibility of each microservice adds to the agility and continuous improvement of cloud-native applications.

CNCF Cloud Native Definition

Cloud native technologies empower organizations to build and run scalable applications in modern, dynamic environments such as public, private, and hybrid clouds. Containers, service meshes, microservices, immutable infrastructure, and declarative APIs exemplify this approach.

These techniques enable loosely coupled systems that are resilient, manageable, and observable. Combined with robust automation, they allow engineers to make high-impact changes frequently and predictably with minimal toil.

The Cloud Native Computing Foundation seeks to drive adoption of this paradigm by fostering and sustaining an ecosystem of open source, vendor-neutral projects. We democratize state-of-the-art patterns to make these innovations accessible for everyone.

"},{"location":"cloud-native/#why-cloud-native","title":"Why Cloud-Native?","text":"

Cloud-native applications are different from the traditional applications that run in your data centres. The applications that are designed in the traditional way are not built keeping cloud compatibility in mind. They may have strong ties with the internal systems. Also, they cannot take advantage of all the benefits of the cloud.

So, we need a new architecture for our applications to utilize the benefits of cloud. There is a need to design the applications keeping cloud in mind and take advantage of several cloud services like storage, queuing, caching etc.

  • Speed, safety, and scalability comes with cloud-native applications.

  • Helps you to quickly deliver the advancements.

  • Allows you to have loose ties into the corporate IT where it most certainly would destabilize legacy architectures.

  • Helps you to continuously deliver your applications with zero downtime.

  • Infrastructure is less predictable.

  • Service instances are all disposable.

  • Deployments are immutable.

  • To meet the expectations of the today\u2019s world customers, these systems are architected for elastic scalability.

"},{"location":"cloud-native/#cloud-native-concepts","title":"Cloud-native concepts","text":"

Some of the important characteristics of cloud-native applications are as follows.

  • Disposable Infrastructure

  • Isolation

  • Scalability

  • Disposable architecture

  • Value added cloud services

  • Polyglot cloud

  • Self-sufficient, full-stack teams

  • Cultural Change

Disposable Infrastructure

While creating applications on cloud, you need several cloud resources as part of it. We often hear how easy it is to create all these resources. But did you ever think how easy is it to dispose them. It is definitely not that easy to dispose them and that is why you don\u2019t hear a lot about it.

In traditional or legacy applications, we have all these resources residing on machines. If these go down, we need to redo them again and most of this is handled by the operations team manually. So, when we are creating applications on cloud, we bring those resources like load balancers, databases, gateways, etc on to cloud as well along with machine images and containers.

While creating these applications, you should always keep in mind that if you are creating a resource when required, you should also be able to destroy it when not required. Without this, we cannot achieve the factors speed, safety and scalability. If you want this to happen, we need automation.

Automation allows you to

  • Deliver new features at any time.

  • Deliver patches faster.

  • Improves the system quality.

  • Facilitates team scale and efficiency.

Now you know what we are talking about. Disposable infrastructure is nothing but Infrastructure as Code.

Infrastructure as Code

Here, you develop the code for automation exactly as same as the you do for the rest of the application using agile methodologies.

  • Automation code is driven by a story.

  • Versioned in the same repository as rest of the code.

  • Continuously tested as part of CI/CD pipeline.

  • Test environments are created and destroyed along with test runs.

Thus, disposable infrastructure lays the ground work for scalability and elasticity.

Isolation

In traditional or legacy applications, the applications are monoliths. So, when there is bug or error in the application, you need to fix it. Once you changed the code, the entire application should be redeployed. Also, there may be side effects which you can never predict. New changes may break any components in the application as they are all inter related.

In cloud-native applications, to avoid the above scenario, the system is decomposed into bounded isolated components. Each service will be defined as one component and they are all independent of each other. So, in this case, when there is a bug or error in the application, you know which component to fix and this also avoids any side effects as the components are all unrelated pieces of code.

Thus, cloud-native systems must be resilient to man made errors. To achieve this we need isolation and this avoids a problem in one component affecting the entire system. Also, it helps you to introduce changes quickly in the application with confidence.

Scalability

Simply deploying your application on cloud does not make it cloud-native. To be cloud native it should be able to take full benefits of the cloud. One of the key features is Scalability.

In today\u2019s world, once your business starts growing, the number of users keep increasing and they may be from different locations. Your application should be able to support more number of devices and it should also be able to maintain its responsiveness. Moreover, this should be efficient and cost-effective.

To achieve this, cloud native application runs in multiple runtimes spread across multiple hosts. The applications should be designed and architected in a way that they support multi regional, active-active deployments. This helps you to increase the availability and avoids single point of failures.

Disposable architecture

Leveraging the disposable infrastructure and scaling isolated components is important for cloud native applications. Disposable architecture is based on this and it takes the idea of disposability and replacement to the next level.

Most of us think in a monolithic way because we got used to traditional or legacy applications a lot. This may lead us to take decisions in monolithic way rather than in cloud native way. In monoliths, we tend to be safe and don\u2019t do a lot of experimentation. But Disposable architecture is exactly opposite to monolithic thinking. In this approach, we develop small pieces of the component and keep experimenting with it to find an optimal solution.

When there is a breakthrough in the application, you can\u2019t simply take decisions based on the available information which may be incomplete or inaccurate. So, with disposable architecture, you start with small increments, and invest time to find the optimal solution. Sometimes, there may be a need to completely replace the component, but that initial work was just the cost of getting the information that caused the breakthrough. This helps you to minimize waste allowing you to use your resources on controlled experiments efficiently and get good value out of it in the end.

Value added cloud services

When you are defining an application, there are many things you need to care of. Each and every service will be associated with many things like databases, storage, redundancy, monitoring, etc. For your application, along with your components, you also need to scale the data. You can reduce the operational risk and also get all such things at greater velocity by leveraging the value-added services that are available on cloud. Sometimes, you may need third party services if they are not available on your cloud. You can externally hook them up with your application as needed.

By using the value added services provided by your cloud provider, you will get to know all the available options on your cloud and you can also learn about all the new services. This will help you to take good long-termed decisions. You can definitely exit the service if you find something more suitable for your component and hook that up with your application based on the requirements.

Polyglot cloud

Most of you are familiar with Polyglot programming. For your application, based on the component, you can choose the programming languages that best suits it. You need not stick to a single programming language for the entire application. If you consider Polyglot persistence, the idea is choose the storage mechanism that suits better on a component by component basis. It allows a better global scale.

Similarly, the next thing will be Polyglot cloud. Like above, here you choose a cloud provider that better suits on a component by component basis. For majority of your components, you may have a go to cloud provider. But, this does not stop you from choosing a different one if it suits well for any of your application components. So, you can run different components of your cloud native system on different cloud providers based on your requirements.

Self-sufficient, full-stack teams

In a traditional set up, many organizations have teams based on skill set like backend, user interface, database, operations etc. Such a structure will not allow you to build cloud native systems.

In cloud native systems, the system is composed of bounded isolated components. They have their own resources. Each of such component must be owned by self-sufficient, full stack team. That team is entirely responsible for all the resources that belong to that particular component. In this set up, team tends to build quality up front in as they are the ones who deploy it and they will be taking care of it if the component is broken. It is more like you build it and then you run it. So, the team can continuously deliver advancements to the components at their own pace. Also, they are completely responsible for delivering it safely.

Cultural Change

Cloud native is different way of thinking. We need to first make up our minds, not just the systems, to utilize the full benefits of cloud. Compared to the traditional systems, there will be lots of things we do differently in cloud-native systems.

To make that happen, cultural change is really important. To change the thinking at high level, we just to first prove that the low level practices can truly deliver and encourage lean thinking. With this practice, you can conduct experimentation. Based on the feedback from business, you can quickly and safely deliver your applications that can scale.

"},{"location":"cloud-native/#cloud-native-roadmap","title":"Cloud-native Roadmap","text":"

You can define your cloud native road map in many ways. You can get there by choosing different paths. Let us see the trail map defined by CNCF.

CNCF defined the Cloud Native Trail Map providing an overview for enterprises starting their cloud native journey as follows.

This cloud map gives us various steps that an engineering team may use while considering the cloud native technologies and exploring them. The most common ones among them are Containerization, CI/CD, and Orchestration. Next crucial pieces will be Observability & Analysis and Service Mesh. And later comes the rest of them like Networking, Distributed Database, Messaging, Container runtime, and software distribution based on your requirements.

  • With out Containerization, you cannot build cloud native applications. This helps your application to run in any computing environment. Basically, all your code and dependencies are packaged up together in to a single unit here. Among different container platforms available, Docker is a preferred one.

  • To bring all the changes in the code to container automatically, it is nice to set up a CI/CD pipeline which does that. There are many tools available like jenkins, travis, etc.

  • Since we have containers, we need container orchestration to manage the container lifecycles. Currently, Kubernetes is one solution which is popular.

  • Monitoring and Observability plays a very important role. It is good to set up some of them like logging, tracing, metrics etc.

  • To enable more complex operational requirements, you can use a service mesh. It helps you out with several things like service discovery, health, routing, A/B testing etc. Istio is one of the examples of service mesh.

  • Networking plays a crucial role. You should define flexible networking layers based on your requirements. For this, you can use Calico, Weave Net etc.

  • Sometimes, you may need distributed databases. Based on your requirements, if you need more scalability and resiliency, these are required.

  • Messaging may be required sometimes too. Go with different messaging queues like Kafka, RabbitMQ etc available when you need them.

  • Container Registry helps you to store all your containers. You can also enable image scanning and signing if required.

  • As a part of your application, sometimes you may need a secure software distribution.

Also, if you want to see the cloud native landscape, check it out here.

"},{"location":"cloud-native/#summary","title":"Summary","text":"

In this, we covered the fundamentals of cloud native systems. You now know what cloud native is, why we need it and how it is important. Cloud native is not just deploying your application on cloud but it is more of taking full advantages of cloud. Also, from cloud-native roadmap, you will get an idea on how to design and architect your cloud-native system. You can also get the idea of different tools, frameworks, platforms etc from the cloud-native landscapes.

Also, if you are interesting in knowing more, we have Cloud-Native: A Complete Guide. Feel free to check this out.

"},{"location":"cloud-native/#references","title":"References","text":"
  • Learn Cloud-native

  • John Gilbert, (2018). Cloud Native Development Patterns and Best Practices. Publisher: Packt Publishing

  • CNCF landscape

  • CNCF Definition

"},{"location":"cloud-native/app-dev/","title":"Cloud Native Application Development","text":""},{"location":"cloud-native/app-dev/#introduction","title":"Introduction","text":"

Cloud native is all about the concepts that are used for building and deploying your applications on any cloud platform. It involves many things like adopting microservices architecture, containerization, orchestration etc.

Cloud native applications will be managed by the infrastructure which in turn is managed by applications. In this installment, let us see what you need to include in your cloud native applications and how to run them on the cloud infrastructure.

"},{"location":"cloud-native/app-dev/#application-design","title":"Application Design","text":"

An application is called cloud native if it is designed in a way such that it takes advantage of most of the benefits of the cloud. So, these applications are all managed by software like mentioned before.

Let's say we have an application packaged in a container and running on Kubernetes. This application does not accept any runtime configuration. There is no logging defined. Some of the configurations like database IP, credentials etc are hardcoded.

What happens if the application stops working ? How are you going to debug it ?

Will you call such an application cloud-native ? Definitely not.

Containers and Kubernetes help you to run the applications smoothly on cloud infrastructure. Along with this, you also need to know how to effectively build and manage these applications.

"},{"location":"cloud-native/app-dev/#cloud-native-capabilities","title":"Cloud native Capabilities","text":"

While developing cloud native applications, some of the capabilities that we include in the applications are as follows.

  • Configuration
  • Health Checks
  • Logging
  • Metrics
  • Resiliency
  • Service discovery

Usually, you can implement them in your applications in two ways.

  • Using the language libraries
  • Using sidecar

To implement these capabilities, you can import the language libraries into your application which will automatically get you most of these capabilities with out defining any extra code. This is easy to do and is well suitable if you have few languages in your applications.

But if your application is a polyglot with many different languages, it is difficult to manage all the libraries. In such cases, you can use sidecar which is implemented as separate service including things like Health end point monitoring health of the application, configuration watching changes in the configs and reloading the application when required, registrator for service discovery, envoy proxy to handle resiliency and metrics etc.

"},{"location":"cloud-native/app-dev/#application-lifecycle","title":"Application Lifecycle","text":"

Deploy

Deploying your cloud native application is not just taking the existing code and running it on cloud. Cloud native applications are defined in a way such the software manage them. For this, make sure you have the below embedded in your application.

  • Continuous integration
  • Continuous deployment
  • Health checks

Deployments for your application should be automated, tested and verified. If you are introducing new features to your applications, you should be able to deploy them dynamically without restarting your applications. Also, when you are planning on a new feature or a new version to be deployed, make sure you have traffic control mechanisms in place which allows you to route the traffic towards or away from the application as per your requirements to reduce the outage impact.

Run

Running your application is one of the most important phases in the application lifecycle. While running the application, two most important aspects to keep in mind are

  • Observability
  • Operability

While running your application, you need to understand the what the application is doing which is observability and also you you should be able to change the application as needed which is operability.

When your application is not meeting the SLO or is broken, what do you do ? In a cloud native application, we follow the below steps to see where the problem resides.

  1. Verify infrastructure tests
  2. Application debugging - This can be done by using application performance monitoring (APM), distributed tracing etc.
  3. More verbose Logging

In today's world, as the business keeps increasing, the application grows and you need to make sure that you defined a proper debugging strategy for your application which makes it easy to dynamically debug the applications similar to how we dynamically deploy them.

One more important things to remember is that it is always easy to push new applications but the converse is not true. Though that is the case, it is still very important to retire the old applications that are not in use.

Retire

In cloud-native applications, all the new services are deployed automatically. Also, the services are monitored automatically using the monitoring mechanisms in place.

Don't you think the services should be retired in the same way too ?

If you keep deploying new services without cleaning up the old ones which are in no longer use accrues a lot of technical debt. So, make sure your application includes a telemetry mechanism which helps you to identify if a service is being used. If not, the decision should be made by the business to keep it or retire it.

"},{"location":"cloud-native/app-dev/#twelve-factor-design-methodology","title":"Twelve factor design methodology","text":"
  • Code base - One code base tracked in revision control, many deploys.
  • Dependencies - Explicitly declare and isolate dependencies.
  • Config - Store config in the environment.
  • Backing services - Treat backing services as attached resources.
  • Build, release, run - Strictly separate build and run stages.
  • Processes - Execute the app as one (or more) stateless process(es).
  • Port binding - Export services through port binding.
  • Concurrency - Scale-out through the process model.
  • Disposability - Maximize robustness with fast startup and graceful shutdown.
  • Dev/prod parity - Keep development, staging, and production as similar as possible.
  • Logs - Treat logs as event streams.
  • Admin processes - Run admin/management tasks as one-off processes.
"},{"location":"cloud-native/app-dev/#application-requirements","title":"Application Requirements","text":"

Runtime and Isolation

Your applications must be isolated from the operating system. You should be able to run them any where. This allows you to run multiple applications on same server and also allows to control their dependencies and resources.

One way to achieve this is containerization. Among the different container options, Docker is popular. Container is nothing but a way to package your application and run it in an isolated environment. While developing the applications, also make sure all the dependencies are declared in your application before packaging it.

Resource Allocation and Scheduling

Your applications must include dynamic scheduling. This helps you to figure out where the application must run and this decisions are automatically taken for you by the scheduler. This scheduler collects all the informations of resources for different system and chooses the right place to run the application. Operator can override the decisions of the scheduler if he wants to.

Environment isolation

You need a proper environment isolation to differentiate dev, test, stage, production etc. based on your requirements. With out the complete duplication of your cluster, the infrastructure should be able to separate the dependencies through different application environments.

These environments should include all of the resources like databases, network resources etc. needed by the application. Cloud native infrastructure can create environments with very low overhead.

Service discovery

In your application, there may be multiple services. These services may depend on one another. How will they find each other if one service needs to communicate with other ? For this, the infrastructure should provide a way for services to find each other.

This may be in different ways. It can be using API calls or using DNS or with network proxies. There should be a service discovery mechanism in place and how you do this does not matter.

Usually cloud native applications make use their infrastructure for service discovery to identify the dependent services. Some of them are cloud metadata services, DNS, etcd and consul etc.

State Management

While defining your cloud native application, you should provide a mechanism to check the status of the application. This can be done by an API or hook that checks the current state of the application like if it is submitted, Scheduled, ready, healthy, unhealthy, terminating etc.

We usually have such capabilities in any of the orchestration platform we use. For example, if you consider Kubernetes, you can do this using events, probes and hooks. When the application is submitted, scheduled, or scaled, the event is triggered. Readiness probe checks if the application is ready and liveness probes checks if the application is healthy. Hooks are used for events that need to happen before or after processes start.

Monitoring and logging

Monitoring and logging should be a part of the cloud-native application. Dynamically monitoring all the services of the application is important. It keeps checking the entire application and is used for debugging purposes when required. Also, make sure your logging system should be able to collect all the logs and consolidate them together based on application, environments, tags etc.

Metrics

Cloud-native applications must include metrics as a part of their code. All the telemetry data needed will be provided by the metrics. This helps you to know whether your application is meeting the service-level objectives.

Metrics are collected at instance level and later aggregated together to provide the complete view of the application. Once the application provides metrics, underlying infrastructure will scrape them out and use them for analysis.

Debugging and tracing

When an application is deployed and problem occurs, we refer to logging system. But if that does not resolve the issue, we need distributed tracing. Distributed tracing helps us to understand what is happening in the application. They will us to debug problems by providing us an interface to visualize which is different from the details we get from logging. Also, it provides shorter feedback loops which helps you to debug distributed systems easily.

Application tracing is always important and make sure it is a part of your cloud-native application. If in case you cannot include it in the application, you can also enable it at infrastructure level using proxies or traffic analysis.

"},{"location":"cloud-native/app-dev/#conclusion","title":"Conclusion","text":"

We discussed the cloud-native application design, implementations of cloud native patterns, and application life cycle. We also saw how we can design our cloud native applications using the twelve factor methodology. Along with this, we also explored what we need to include in our cloud naive application while building it.

"},{"location":"cloud-native/app-dev/#references","title":"References","text":"
  • https://learning.oreilly.com/library/view/managing-cloud-native/9781492037071/[Justin Garrison, Kris Nova, (2018). Managing cloud native applications. Publisher: O'Reilly Media, Inc.]
  • https://learning.oreilly.com/library/view/cloud-native-architectures/9781787280540/[Piyum Zonooz, Erik Farr, Kamal Arora, Tom Laszewski, (2018). Cloud Native Architectures. Publisher: Packt Publishing]
  • https://12factor.net/codebase[12factor.net]
"},{"location":"containers/","title":"Containers Introduction","text":"

You wanted to run your application on different computing environments. It may be your laptop, test environment, staging environment or production environment.

So, when you run it on these different environments, will your application work reliably ?

What if some underlying software changes ? What if the security policies are different ? or something else changes ?

To solve this problems, we need Containers.

"},{"location":"containers/#containers","title":"Containers","text":"

Containers are a standard way to package an application and all its dependencies so that it can be moved between environments and run without change. They work by hiding the differences between applications inside the container so that everything outside the container can be standardized.

For example, Docker created standard way to create images for Linux Containers.

"},{"location":"containers/#presentations","title":"Presentations","text":"

Container Basics

"},{"location":"containers/#why-containers","title":"Why containers ?","text":"
  • We can run them anywhere.
  • They are lightweight .
  • Isolate your application from others.
"},{"location":"containers/#different-container-standards","title":"Different Container Standards","text":"

There are many different container standards available today. Some of them are as follows.

Docker - The most common standard, made Linux containers usable by the masses.

Rocket (rkt) - An emerging container standard from CoreOS, the company that developed etcd.

Garden - The format Cloud Foundry builds using buildpacks.

Among them, Docker was one of the most popular mainstream container software tools.

Open Container Initiative (OCI)

A Linux Foundation project developing a governed container standard. Docker and Rocket are OCI-compliant. But, Garden is not.

"},{"location":"containers/#benefits","title":"Benefits","text":"
  • Lightweight
  • Scalable
  • Efficient
  • Portable
  • Supports agile development

To know more about Containerization, we have couple of guides. Feel free to check them out.

  • Containerization: A Complete Guide.
  • Containers: A Complete Guide.
"},{"location":"containers/#docker","title":"Docker","text":"

Docker is one of the most popular Containerization platforms which allows you to develop, deploy, and run application inside containers.

  • It is an open source project.
  • Can run it anywhere.

An installation of Docker includes an engine. This comes with a daemon, REST APIs, and CLI. Users can use CLI to interact with the docker using commands. These commands are sent to the daemon which listens for the Docker Rest APIs which in turn manages images and containers. The engine runs a container by retrieving its image from the local system or registry. A running container starts one or more processes in the Linux kernel.

"},{"location":"containers/#docker-image","title":"Docker Image","text":"

A read-only snapshot of a container that is stored in Docker Hub or in private repository. You use an image as a template for building containers.

These images are build from the Dockerfile.

Dockerfile

  • It is a text document that contains all the instructions that are necessary to build a docker image.
  • It is written in an easy-to-understand syntax.
  • It specifies the operating system.
  • It also includes things like environmental variables, ports, file locations etc.

If you want to try building docker images, try this course on O'Reilly (Interactive Learning Platform).

  • Building Container Images - Estimated Time: 12 minutes.
"},{"location":"containers/#docker-container","title":"Docker Container","text":"

The standard unit where the application service is located or transported. It packages up all code and its dependencies so that the application runs quickly and reliably from one computing environment to another.

If you want to try deploying a docker container, try this course on O'Reilly (Interactive Learning Platform).

"},{"location":"containers/#docker-engine","title":"Docker Engine","text":"

Docker Engine is a program that creates, ships, and runs application containers. The engine runs on any physical or virtual machine or server locally, in private or public cloud. The client communicates with the engine to run commands.

If you want to learn more about docker engines, try this course on O'Reilly

"},{"location":"containers/#docker-registry","title":"Docker Registry","text":"

The registry stores, distributes, and shares container images. It is available in software as a service (SaaS) or in an enterprise to deploy anywhere you that you choose.

Docker Hub is a popular registry. It is a registry which allows you to download docker images which are built by different communities. You can also store your own images there. You can check out various images available on docker hub here.

"},{"location":"containers/#references","title":"References","text":"
  • Docker resources
  • Docker tutorial
  • The Evolution of Linux Containers and Their Future
  • Open Container Initiative (OCI)
  • Cloud Native Computing Foundation (CNCF)
  • Demystifying the Open Container Initiative (OCI) Specifications
"},{"location":"containers/imageregistry/","title":"Image Registries","text":"

A registry is a repository used to store and access container images. Container registries can support container-based application development, often as part of DevOps processes.

Container registries save developers valuable time in the creation and delivery of cloud-native applications, acting as the intermediary for sharing container images between systems. They essentially act as a place for developers to store container images and share them out via a process of uploading (pushing) to the registry and downloading (pulling) into another system, like a Kubernetes cluster.

Learn More

Tutorial

Make sure you have Docker Desktop installed and up and running.

Login to Quay
docker login quay.io\nUsername: your_username\nPassword: your_password\nEmail: your_email\n

First we'll create a container with a single new file based off of the busybox base image: Create a new container

docker run busybox echo \"fun\" > newfile\n
The container will immediately terminate, so we'll use the command below to list it:
docker ps -l\n
The next step is to commit the container to an image and then tag that image with a relevant name so it can be saved to a respository.

Replace \"container_id\" with your container id from the previous command. Create a new image

docker commit container_id quay.io/your_username/repository_name\n
Be sure to replace \"your_username\" with your quay.io username and \"respository_name\" with a unique name for your repository.

Now that we've tagged our image with a repository name, we can push the respository to Quay Container Registry: Push the image to Quay

docker push quay.io/your_username/repository_name\n
Your respository has now been pushed to Quay Container Registry!

To view your repository, click on the button below:

Repositories

"},{"location":"containers/reference/","title":"Containers","text":"

Containers are a standard way to package an application and all its dependencies so that it can be moved between environments and run without change. They work by hiding the differences between applications inside the container so that everything outside the container can be standardized.

For example, Docker created standard way to create images for Linux Containers.

"},{"location":"containers/reference/#basic-docker-commands","title":"Basic Docker Commands","text":"Action Command Get Docker version docker version Run hello-world Container docker run hello-world List Running Containers docker ps Stop a container docker stop <container-name/container-id> List Docker Images docker images Login into registry docker login Build an image docker build -t <image_name>:<tag> . Inspect a docker object docker inspect <name/id> Inspect a docker image docker inspect image <name/id> Pull an image docker pull <image_name>:<tag> Push an Image docker push <image_name>:<tag> Remove a container docker rm <container-name/container-id>"},{"location":"containers/reference/#running-docker","title":"Running Docker","text":"Local DockerIBM Cloud
  1. Install Docker Desktop

  2. Test it out

  1. Install ibmcloud CLI

    curl -fsSL https://clis.cloud.ibm.com/install/osx | sh\n

  2. Verify installation

    ibmcloud help\n

  3. Configure environment. Go to cloud.ibm.com -> click on your profile -> Log into CLI and API and copy IBM Cloud CLI command. It will look something like this:

    ibmcloud login -a https://cloud.ibm.com -u passcode -p <password>\n

  4. Log into docker through IBM Cloud

    ibmcloud cr login --client docker\n

"},{"location":"containers/reference/#activities","title":"Activities","text":"

| Task | Description | Link | Time | | ----------------------- | --------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------- | ------ | | | | | IBM Container Registry | Build and Deploy Run using IBM Container Registry | IBM Container Registry | 30 min | | Docker Lab | Running a Sample Application on Docker | Docker Lab | 30 min |

Once you have completed these tasks, you should have a base understanding of containers and how to use Docker.

"},{"location":"devops/","title":"DevOps Introduction","text":"

DevOps has recently become a popular buzzword in the Cloud World. It varies from business to business and it means a lot different things to different people. In traditional IT, organizations have separate teams for Development and Operations. The development team is responsible for coding and operations team is responsible for releasing it to production. When it comes to this two different teams, there will always be some sort of differences. It may be due to the usage of different system environments, software libraries etc. In order to level this up, DevOps came into play.

"},{"location":"devops/#what-is-devops","title":"What is DevOps ?","text":"

\u201cDevOps is a philosophy, a cultural shift that merges operations with development and demands a linked toolchain of technologies to facilitate collaborative change. DevOps toolchains \u2026 can include dozens of non-collaborative tools, making the task of automation a technically complex and arduous one.\u201d - Gartner

These days every business has critical applications which can never go down. Some of the examples are as follows.

In order to make sure that these applications are up and running smoothly, we need DevOps.

Adopting DevOps allows enterprises to create, maintain and improve their applications at a faster pace than the traditional methods. Today, most of the global organizations adopted DevOps.

"},{"location":"devops/#presentations","title":"Presentations","text":"

Tekton Overview

GitOps Overview

"},{"location":"devops/#benefits-of-devops","title":"Benefits of DevOps","text":"
  • Continuous software delivery
  • High quality software
  • Increased speed and faster problem resolution
  • Increased reliability
  • Easier to manage the software
  • Collaboration and enhanced team communication
  • Customer satisfaction etc.
"},{"location":"devops/#understanding-devops","title":"Understanding DevOps","text":"

Like we mentioned before, often development teams and operation teams are in conflict with each other. Developers keeping changing the software to include new features where as operation engineers wants to keep the system stable.

  • Their goals are different.
  • They use different processes.
  • They use different tools.

All these may be different reasons for the gap between these two teams.

To solve this gap between the two teams, we need DevOps. It closes the gap by aligning incentives and sharing approaches for tools and processes. It helps us to streamline the software delivery process. From the time we begin the project till its delivery, it helps us to improve the cycle time by emphasizing the learning by gathering feedback from production to development.

It includes several aspects like the below.

  • Automation - It is quite essential for DevOps. It helps us to gather quick feedback.
  • Culture - Processes and tools are important. But, people are always more important.
  • Measurement - Shared incentives are important. Quality is critical.
  • Sharing - Need a Culture where people can share ideas, processes and tools.

"},{"location":"devops/#where-to-start","title":"Where to start ?","text":"

Understanding the eco system of your software is important. Identify all the environments like dev, test, prod etc. you have in your system and how the delivery happens from end to end.

  • Define continuous delivery
  • Establish proper collaboration between teams
  • Make sure the teams are on same pace
  • Identify the pain points in your system and start working on them.
"},{"location":"devops/#devops-best-practices","title":"DevOps Best Practices","text":"

These are some of the standard practices adopted in DevOps.

  • Source Code Management
  • Code Review
  • Configuration Management
  • Build Management
  • Artifact Repository Management
  • Release Management
  • Test Automation
  • Continuous Integration
  • Continuous Delivery
  • Continuous Deployment
  • Infrastructure As Code
  • Automation
  • Key Application Performance Monitoring/Indicators

Source Code Management

Source Code Management (SCM) systems helps to maintain the code base. It allows multiple developers to work on the code concurrently. It prevents them from overwriting the code and helps them to work in parallel from different locations.

Collaboration is an important concept in devOps and SCM helps us to achieve it by coordination of services across the development team. It also tracks co-authoring, collaboration, and individual contributions. It helps the developers to audit the code changes. It also allows rollbacks if required. It also enables backup and allows recovery when required.

Code Review

Code reviews allows the developer to improve the quality of code. They help us to identify the problems in advance. By reviewing the code, we can fix some of the problems like memory leaks, buffer overflow, formatting errors etc.

This process improves the collaboration across the team. Also, code defects are identified and removed before merging them with the main stream there by improving the quality of the code.

Configuration Management

Configuration Management is managing the configurations by identifying, verifying, and maintaining them. This is done for both software and hardware. The configuration management tools make sure that configurations are properly configured across different systems as per the requirements.

This helps to analyze the impact on the systems due to configurations. It makes sure the provisioning is done correctly on different systems like dev, QA, prod etc. It simplifies the coordination between development and operations teams.

Build Management

Build Management helps to assmble the build environment by packaging all the required components such as the source code, dependencies, etc of the software application together in to a workable unit. Builds can be done manually, on-demand or automated.

It ensures that the software is stable and it is reusable. It improves the quality of the software and makes sure it is reliable. It also increases the efficiency.

Artifact Repository Management

Artifact Repository Management system is used to manage the builds. It is dedicated server which is used to store all the binaries which were outputs of the successful builds.

It manages the life cycles of different artifacts. It helps you to easily share the builds across the team. It controls access to the build artifacts by access control.

Release Management

Release management is a part of software development lifecycle which manages the release from development till deployment to support. Requests keep coming for the addition of the new features. Also, sometimes there may be need to change the existing functionality. This is when the cycle begins for the release management. Once, the new feature or change is approved, it is designed, built, tested, reviewed, and after acceptance, deployed to production. After this, it goes to maintainence and even at this point, there may be need for enhancement. If that is the case, it will be a new cycle again.

It helps us to track all the phases and status of deployments in different environments.

Test Automation

Manual testing takes lots of time. We can automate some of the manual tests which are repetitive, time consuming, and have defined input by test automation.

Automatic tests helps to improve the code quality, reduces the amount of time spent on testing, and improves the effectiveness of the overall testing life cycle.

Continuous Integration

Continuous integration allows the developers to continuously integrate the code they developed. Whenever a latest code change is made and committed to the source control system, the source code is rebuilt and this is then forwarded to testing.

With this, the latest code is always available, the builds are faster and the tests are quick.

Continuous Delivery

Continuous Delivery is the next step to Continuous Integration. In the integration, the code is built and tested. Now in the delivery, this is taken to staging environment. This is done in small frequencies and it makes sure the functionality of the software is stable.

It reduces the manual overhead. The code is continuously delivered and constantly reviewed.

Continuous Deployment

Continuous Deployment comes after Continuous Delivery. In the deployment stage, the code is deployed to the production environment. The entire process is automated in this stage.

This allows faster software releases. Improves the collaboration across the teams. Enhances the code quality.

Infrastructure As Code

Infrastructure as Code is defining the infrastructure services as a software code. they are defines as configuration files. Traditionally, in on-premise application, these are run by system administrators but in cloud, the infrastructure is maintained like any other software code.

Helps us to change the system configuration quickly. Tracking is easy and end to end testing is possible. Infrastructure availability is high.

Automation

Automation is key part to DevOps. Without automation, DevOps is not efficient.

Automation comes into play whenever there is a repetitive task. Developers can automate infrastructure, applications, load balancers, etc.

Key Application Performance Monitoring/Indicators

DevOps is all about measuring the metrics and feedback, with continuous improvement processes. Collecting metrics and monitoring the software plays an important role. Different measures like uptime versus downtime, resolutions time lines etc. helps us to understand the performance of the system.

"},{"location":"devops/#devops-in-twelve-factor-apps","title":"Devops in Twelve factor apps","text":"

If you are new to Twelve factor methodology, have a look here. For more details, checkout Cloud-Native module.

"},{"location":"devops/#devops-reference-architecture","title":"DevOps Reference Architecture","text":"
  1. Collaboration tools enable a culture of innovation. Developers, designers, operations teams, and managers must communicate constantly. Development and operations tools must be integrated to post updates and alerts as new builds are completed and deployed and as performance is monitored. The team can discuss the alerts as a group in the context of the tool.
  2. As the team brainstorms ideas, responds to feedback and metrics, and fixes defects, team members create work items and rank them in the backlog. The team work on items from the top of the backlog, delivering to production as they complete work.
  3. Developers write source code in a code editor to implement the architecture. They construct, change, and correct applications by using various coding models and tools.
  4. Developers manage the versions and configuration of assets, merge changes, and manage the integration of changes. The source control tool that a team uses should support social coding.
  5. Developers compile, package, and prepare software assets. They need tools that can assess the quality of the code that is being delivered to source control. Those assessments are done before delivery, are associated with automated build systems, and include practices such as code reviews, unit tests, code quality scans, and security scans.
  6. Binary files and other output from the build are sent to and managed in a build artifact repository.
  7. The release is scheduled. The team needs tools that support release communication and managing, preparing, and deploying releases.
  8. The team coordinates the manual and automated processes that are required for the solution to operate effectively. The team must strive towards continuous delivery with zero downtime. A/B deployments can help to gauge the effectiveness of new changes.
  9. The team must understand the application and the options for the application's runtime environment, security, management, and release requirements.
  10. Depending on the application requirements, some or all of the application stack must be considered, including middleware, the operating system, and virtual machines.
  11. The team must ensure that all aspects of the application and its supporting infrastructure are secured.
  12. The team plans, configures, monitors, defines criteria, and reports on application availability and performance. Predictive analytics can indicate problems before they occur.
  13. The right people on the team or systems are notified when issues occur.
  14. The team manages the process for responding to operations incidents, and delivers the changes to fix any incidents.
  15. The team uses analytics to learn how users interact with the application and measure success through metrics.
  16. When users interact with the application, they can provide feedback on their requirements and how the application is meeting them, which is captured by analytics as well.
  17. DevOps engineers manage the entire application lifecycle while they respond to feedback and analytics from the running application.
  18. The enterprise network is protected by a firewall and must be accessed through transformation and connectivity services and secure messaging services.
  19. The security team uses the user directory throughout the flow. The directory contains information about the user accounts for the enterprise.

For a cloud native implementation, the reference architecture will be as follows.

"},{"location":"devops/#references","title":"References","text":"
  • [Michael H\u00fcttermann (2012). DevOps for Developers. Publisher: Apress] (https://learning.oreilly.com/library/view/devops-for-developers/9781430245698/)
  • [Sricharan Vadapalli (2018). DevOps: Continuous Delivery, Integration, and Deployment with DevOps. Publisher: Packt Publishing] (https://learning.oreilly.com/library/view/devops-continuous-delivery/9781789132991/)
  • [DevOps Architecture] (https://www.ibm.com/cloud/garage/architectures/devOpsArchitecture/0_1)
"},{"location":"devops/argocd/","title":"Continuous Deployment","text":"

Continuous Integration, Delivery, and Deployment are important devOps practices and we often hear a lot about them. These processes are valuable and ensures that the software is up to date timely.

  • Continuous Integration is an automation process which allows developers to integrate their work into a repository. When a developer pushes his work into the source code repository, it ensures that the software continues to work properly. It helps to enable collaborative development across the teams and also helps to identify the integration bugs sooner.
  • Continuous Delivery comes after Continuous Integration. It prepares the code for release. It automates the steps that are needed to deploy a build.
  • Continuous Deployment is the final step which succeeds Continuous Delivery. It automatically deploys the code whenever a code change is done. Entire process of deployment is automated.
"},{"location":"devops/argocd/#what-is-gitops","title":"What is GitOps?","text":"

GitOps in short is a set of practices to use Git pull requests to manage infrastructure and application configurations. Git repository in GitOps is considered the only source of truth and contains the entire state of the system so that the trail of changes to the system state are visible and auditable.

  • Traceability of changes in GitOps is no novelty in itself as this approach is almost universally employed for the application source code. However GitOps advocates applying the same principles (reviews, pull requests, tagging, etc) to infrastructure and application configuration so that teams can benefit from the same assurance as they do for the application source code.
  • Although there is no precise definition or agreed upon set of rules, the following principles are an approximation of what constitutes a GitOps practice:
  • Declarative description of the system is stored in Git (configs, monitoring, etc)
  • Changes to the state are made via pull requests
  • Git push reconciled with the state of the running system with the state in the Git repository
"},{"location":"devops/argocd/#argocd-overview","title":"ArgoCD Overview","text":""},{"location":"devops/argocd/#presentations","title":"Presentations","text":"

GitOps Overview

"},{"location":"devops/argocd/#activities","title":"Activities","text":"

These activities give you a chance to walkthrough building CD pipelines using ArgoCD.

These tasks assume that you have: - Reviewed the Continuous Deployment concept page.

Task Description Link Time Walkthroughs GitOps Introduction to GitOps with OpenShift Learn OpenShift GitOps 20 min Try It Yourself ArgoCD Lab Learn how to setup ArgoCD and Deploy Application ArgoCD 30 min

Once you have completed these tasks, you will have created an ArgoCD deployment and have an understanding of Continuous Deployment.

"},{"location":"devops/ibm-toolchain/","title":"IBM ToolChain","text":"

By following this tutorial, you create an open toolchain that includes a Tekton-based delivery pipeline. You then use the toolchain and DevOps practices to develop a simple \"Hello World\" web application (app) that you deploy to the IBM Cloud Kubernetes Service.

Tekton is an open source, vendor-neutral, Kubernetes-native framework that you can use to build, test, and deploy apps to Kubernetes. Tekton provides a set of shared components for building continuous integration and continuous delivery (CICD) systems. As an open source project, Tekton is managed by the Continuous Delivery Foundation (CDF). The goal is to modernize continuous delivery by providing industry specifications for pipelines, workflows, and other building blocks. With Tekton, you can build, test, and deploy across cloud providers or on-premises systems by abstracting the underlying implementation details. Tekton pipelines are built in to IBM Cloud\u2122 Continuous Delivery..

After you create the cluster and the toolchain, you change your app's code and push the change to the Git Repos and Issue Tracking repository (repo). When you push changes to your repo, the delivery pipeline automatically builds and deploys the code.

"},{"location":"devops/ibm-toolchain/#prerequisites","title":"Prerequisites","text":"
  1. You must have an IBM Cloud account. If you don't have one, sign up for a trial. The account requires an IBMid. If you don't have an IBMid, you can create one when you register.
  2. Verify the toolchains and tool integrations that are available in your region and IBM Cloud environment. A toolchain is a set of tool integrations that support development, deployment, and operations tasks.

  3. You need a Kubernetes cluster and an API key. You can create them by using either the UI or the CLI. You can create from the IBM Cloud Catalog

  4. Create a container registry namespace to deploy the container we are goign to build. Youc an create from the Container Registry UI

  5. Create the API key by using the string that is provided for your key name.

    ibmcloud iam api-key-create my-api-key\n
    Save the API key value that is provided by the command.

"},{"location":"devops/ibm-toolchain/#create-continues-delivery-service-instance","title":"Create Continues Delivery Service Instance","text":"
  1. Open the IBM Cloud Catalog
  2. Search for delivery
  3. Click on Continuous Delivery
  4. Select Dallas Region, as the Tutorial will be using Managed Tekton Worker available in Dallas only.
  5. Select a Plan
  6. Click Create
"},{"location":"devops/ibm-toolchain/#create-an-ibm-cloud-toolchain","title":"Create an IBM Cloud Toolchain","text":"

In this task, you create a toolchain and add the tools that you need for this tutorial. Before you begin, you need your API key and Kubernetes cluster name.

  1. Open the menu in the upper-left corner and click DevOps. Click ToolChains. Click Create a toolchain. Type in the search box toolchain. Click Build Your Own Toolchain.
  2. On the \"Build your own toolchain\" page, review the default information for the toolchain settings. The toolchain's name identifies it in IBM Cloud. Each toolchain is associated with a specific region and resource group. From the menus on the page, select the region Dallas since we are going to use the Beta Managed Tekton Worker, if you use Private Workers you can use any Region.
  3. Click Create. The blank toolchain is created.
  4. Click Add a Tool and click Git Repos and Issue Tracking.
    • From the Repository type list, select Clone.
    • In the Source repository URL field, type https://github.com/csantanapr/hello-tekton.git.
    • Make sure to uncheck the Make this repository private checkbox and that the Track deployment of code changes checkbox is selected.
    • Click Create Integration. Tiles for Git Issues and Git Code are added to your toolchain.
  5. Return to your toolchain's overview page.
  6. Click Add a Tool. Type pipeline in seach box and click Delivery Pipeline.
    • Type a name for your new pipeline.
    • Click Tekton.
    • Make sure that the Show apps in the View app menu checkbox is selected. All the apps that your pipeline creates are shown in the View App list on the toolchain's Overview page.
    • Click Create Integration to add the Delivery Pipeline to your toolchain.
  7. Click Delivery Pipeline to open the Tekton Delivery Pipeline dashboard. Click the Definitions tab and complete these tasks:
  8. Click Add to add your repository.
  9. Specify the Git repo and URL that contains the Tekton pipeline definition and related artifacts. From the list, select the Git repo that you created earlier.
  10. Select the branch in your Git repo that you want to use. For this tutorial, use the default value.
  11. Specify the directory path to your pipeline definition within the Git repo. You can reference a specific definition within the same repo. For this tutorial, use the default value.
  12. Click Add, then click Save
  13. Click the Worker tab and select the private worker that you want to use to run your Tekton pipeline on the associated cluster. Either select the private worker you set up in the previous steps, or select the IBM Managed workers in DALLAS option.
  14. Click Save
  15. Click the Triggers tab, click Add trigger, and click Git Repository. Associate the trigger with an event listener:
  16. From the Repository list, select your repo.
  17. Select the When a commit is pushed checkbox, and in the EventListener field, make sure that listener is selected.
  18. Click Save
  19. On the Triggers tab, click Add trigger and click Manual. Associate that trigger with an event listener:
  20. In the EventListener field, make sure that listener is selected.
  21. Click Save. Note: Manual triggers run when you click Run pipeline and select the trigger. Git repository triggers run when the specified Git event type occurs for the specified Git repo and branch. The list of available event listeners is populated with the listeners that are defined in the pipeline code repo.
  22. Click the Environment properties tab and define the environment properties for this tutorial. To add each property, click Add property and click Text property. Add these properties:
Parameter Required? Description apikey required Type the API key that you created earlier in this tutorial. cluster Optional (cluster) Type the name of the Kubernetes cluster that you created. registryNamespace required Type the IBM Image Registry namespace where the app image will be built and stored. To use an existing namespace, use the CLI and run ibmcloud cr namespace-list to identify all your current namespaces repository required Type the source Git repository where your resources are stored. This value is the URL of the Git repository that you created earlier in this tutorial. To find your repo URL, return to your toolchain and click the Git tile. When the repository is shown, copy the URL. revision Optional (master) The Git branch clusterRegion Optional (us-south) Type the region where your cluster is located. clusterNamespace Optional (prod) The namespace in your cluster where the app will be deployed. registryRegion Optional (us-south) The region where your Image registry is located. To find your registry region, use the CLI and run ibmcloud cr region.

12. Click Save

"},{"location":"devops/ibm-toolchain/#explore-the-pipeline","title":"Explore the pipeline","text":"

With a Tekton-based delivery pipeline, you can automate the continuous building, testing, and deployment of your apps.

The Tekton Delivery Pipeline dashboard displays an empty table until at least one Tekton pipeline runs. After a Tekton pipeline runs, either manually or as the result of external Git events, the table lists the run, its status, and the last updated time of the run definition.

To run the manual trigger that you set up in the previous task, click Run pipeline and select the name of the manual trigger that you created. The pipeline starts to run and you can see the progress on the dashboard. Pipeline runs can be in any of the following states:

  • Pending: The PipelineRun definition is queued and waiting to run.
  • Running: The PipelineRun definition is running in the cluster.
  • Succeeded: The PipelineRun definition was successfully completed in the cluster.
  • Failed: The PipelineRun definition run failed. Review the log file for the run to determine the cause.

  • For more information about a selected run, click any row in the table. You view the Task definition and the steps in each PipelineRun definition. You can also view the status, logs, and details of each Task definition and step, and the overall status of the PipelineRun definition.

  • The pipeline definition is stored in the pipeline.yaml file in the .tekton folder of your Git repository. Each task has a separate section of this file. The steps for each task are defined in the tasks.yaml file.

  • Review the pipeline-build-task. The task consists of a git clone of the repository followed by two steps:

    • pre-build-check: This step checks for the mandatory Dockerfile and runs a lint tool. It then checks the registry current plan and quota before it creates the image registry namespace if needed.
    • build-docker-image: This step creates the Docker image by using the IBM Cloud Container Registry build service through the ibmcloud cr build CLI script.
  • Review the pipeline-validate-task. The task consists of a git clone of the repository, followed by the check-vulnerabilities step. This step runs the IBM Cloud Vulnerability Advisor on the image to check for known vulnerabilities. If it finds a vulnerability, the job fails, preventing the image from being deployed. This safety feature prevents apps with security holes from being deployed. The image has no vulnerabilities, so it passes. In this tutorial template, the default configuration of the job is to not block on failure.
  • Review the pipeline-deploy-task. The task consists of a git clone of the repository followed by two steps:
    • pre-deploy-check: This step checks whether the IBM Container Service cluster is ready and has a namespace that is configured with access to the private image registry by using an IBM Cloud API Key.
    • deploy-to-kubernetes: This step updates the deployment.yml manifest file with the image url and deploys the application using kubectl apply
  • After all the steps in the pipeline are completed, a green status is shown for each task. Click the deploy-to-kubernetes step and click the Logs tab to see the successful completion of this step.
  • Scroll to the end of the log. The DEPLOYMENT SUCCEEDED message is shown at the end of the log.
  • Click the URL to see the running application.
"},{"location":"devops/ibm-toolchain/#modify-the-app-code","title":"Modify the App Code","text":"

In this task, you modify the application and redeploy it. You can see how your Tekton-based delivery pipeline automatically picks up the changes in the application on commit and redeploys the app.

  1. On the toolchain's Overview page, click the Git tile for your application.
    • Tip: You can also use the built-in Eclipse Orion-based Web IDE, a local IDE, or your favorite editor to change the files in your repo.
  2. In the repository directory tree, open the app.js file.
  3. Edit the text message code to change the welcome message.
  4. Commit the updated file by typing a commit message and clicking Commit changes to push the change to the project's remote repository.
  5. Return to the toolchain's Overview page by clicking the back arrow.
  6. Click Delivery Pipeline. The pipeline is running because the commit automatically started a build. Over the next few minutes, watch your change as it is built, tested, and deployed.
  7. After the deploy-to-kubernetes step is completed, refresh your application URL. The updated message is shown.
"},{"location":"devops/ibm-toolchain/#clean-up-resources","title":"Clean up Resources","text":"

In this task, you can remove any of the content that is generated by this tutorial. Before you begin, you need the IBM Cloud CLI and the IBM Cloud Kubernetes Service CLI. Instructions to install the CLI are in the prerequisite section of this tutorial.

  1. Delete the git repository, sign in into git, select personal projects. Then go to repository General settings and remove the repository.
  2. Delete the toolchain. You can delete a toolchain and specify which of the associated tool integrations you want to delete. When you delete a toolchain, the deletion is permanent.
    • On the DevOps dashboard, on the Toolchains page, click the toolchain to delete. Alternatively, on the app's Overview page, on the Continuous delivery card, click View Toolchain.
    • Click the More Actions menu, which is next to View app.
    • Click Delete. Deleting a toolchain removes all of its tool integrations, which might delete resources that are managed by those integrations.
    • Confirm the deletion by typing the name of the toolchain and clicking Delete.
    • Tip: When you delete a GitHub, GitHub Enterprise, or Git Repos and Issue Tracking tool integration, the associated repo isn't deleted from GitHub, GitHub Enterprise, or Git Repos and Issue Tracking. You must manually remove the repo.
  3. Delete the cluster or discard the namespace from it. It is easiest to delete the entire namespace (Please do not delete the default namespace) by using the IBM Cloud\u2122 Kubernetes Service CLI from a command-line window. However, if you have other resources that you need to keep in the namespace, you need to delete the application resources individually instead of the entire namespace. To delete the entire namespace, enter this command:
    kubectl delete namespace [not-the-default-namespace]\n
  4. Delete your IBM Cloud API key.
  5. From the Manage menu, click Access (IAM). Click IBM Cloud API Keys.
  6. Find your API Key in the list and select Delete from the menu to the right of the API Key name.
  7. Delete the container images. To delete the images in your container image registry, enter this command in a command-line window:
    ibmcloud cr image-rm IMAGE [IMAGE...]\n
    If you created a registry namespace for the tutorial, delete the entire registry namespace by entering this command:
    ibmcloud cr namespace-rm NAMESPACE\n
    • Note: You can run this tutorial many times by using the same registry namespace and cluster parameters without discarding previously generated resources. The generated resources use randomized names to avoid conflicts.
"},{"location":"devops/ibm-toolchain/#summary","title":"Summary","text":"

You created a toolchain with a Tekton-based delivery pipeline that deploys a \"Hello World\" app to a secure container in a Kubernetes cluster. You changed a message in the app and tested your change. When you pushed the change to the repo, the delivery pipeline automatically redeployed the app.

  • Read more about the IBM Cloud Kubernetes Service
  • Read more about Tekton
  • Explore the DevOps reference architecture.
"},{"location":"devops/tekton/","title":"Continuous Integration","text":"

Continuous Integration, Delivery, and Deployment are important devOps practices and we often hear a lot about them. These processes are valuable and ensures that the software is up to date timely.

  • Continuous Integration is an automation process which allows developers to integrate their work into a repository. When a developer pushes his work into the source code repository, it ensures that the software continues to work properly. It helps to enable collaborative development across the teams and also helps to identify the integration bugs sooner.
  • Continuous Delivery comes after Continuous Integration. It prepares the code for release. It automates the steps that are needed to deploy a build.
  • Continuous Deployment is the final step which succeeds Continuous Delivery. It automatically deploys the code whenever a code change is done. Entire process of deployment is automated.
"},{"location":"devops/tekton/#tekton-overview","title":"Tekton Overview","text":"

Tekton is a cloud-native solution for building CI/CD systems. It consists of Tekton Pipelines, which provides the building blocks, and of supporting components, such as Tekton CLI and Tekton Catalog, that make Tekton a complete ecosystem.

"},{"location":"devops/tekton/#presentations","title":"Presentations","text":"

Tekton Overview IBM Cloud DevOps with Tekton

"},{"location":"devops/tekton/#activities","title":"Activities","text":"

The continuous integration activities focus around Tekton the integration platform. These labs will show you how to build pipelines and test your code before deployment.

These tasks assume that you have:

  • Reviewed the continuous integration concept page.
  • Installed Tekton into your cluster.
Task Description Link Time Walkthroughs Deploying Applications From Source Using OpenShift 4 S2I 30 min Try It Yourself Tekton Lab Using Tekton to build container images Tekton 1 hour IBM Cloud DevOps Using IBM Cloud ToolChain with Tekton Tekton on IBM Cloud 1 hour Jenkins Lab Using Jenkins to build and deploy applications. Jenkins 1 hour

Once you have completed these tasks, you will have an understanding of continuous integration and how to use Tekton to build a pipeline.

"},{"location":"labs/","title":"Activities","text":""},{"location":"labs/#containers","title":"Containers","text":"Task Description Link Try It Yourself IBM Container Registry Build and Deploy Run using IBM Container Registry IBM Container Registry Docker Lab Running a Sample Application on Docker Docker Lab"},{"location":"labs/#kubernetes","title":"Kubernetes","text":"Task Description Link Try It Yourself Pod Creation Challenge yourself to create a Pod YAML file to meet certain parameters. Pod Creation Pod Configuration Configure a pod to meet compute resource requirements. Pod Configuration Multiple Containers Build a container using legacy container image. Multiple Containers Probes Create some Health & Startup Probes to find what's causing an issue. Probes Debugging Find which service is breaking in your cluster and find out why. Debugging Rolling Updates Lab Create a Rolling Update for your application. Rolling Updates Cron Jobs Lab Using Tekton to test new versions of applications. Crons Jobs Creating Services Create two services with certain requirements. Setting up Services Setting up Persistent Volumes Create a Persistent Volume that's accessible from a SQL Pod. Setting up Persistent Volumes IKS Ingress Controller Configure Ingress on Free IKS Cluster Setting IKS Ingress Solutions Lab Solutions Solutions for the Kubernetes Labs Solutions"},{"location":"labs/#continuous-integration","title":"Continuous Integration","text":"Task Description Link Walkthroughs Deploying Applications From Source Using OpenShift 4 S2I Try It Yourself Tekton Lab Using Tekton to test new versions of applications. Tekton IBM Cloud DevOps Using IBM Cloud ToolChain with Tekton Tekton on IBM Cloud Jenkins Lab Using Jenkins to test new versions of applications. Jenkins"},{"location":"labs/#continuous-deployment","title":"Continuous Deployment","text":"Task Description Link Walkthroughs GitOps Introduction to GitOps with OpenShift Learn OpenShift GitOps Multi-cluster Multi-cluster GitOps with OpenShift Learn OpenShift Try It Yourself ArgoCD Lab Learn how to setup ArgoCD and Deploy Application ArgoCD"},{"location":"labs/#projects","title":"Projects","text":"Task Description Link Try It Yourself Cloud Native Challenge Deploy your own app using what we have learned CN Challenge"},{"location":"labs/containers/","title":"Docker Lab","text":""},{"location":"labs/containers/#introduction","title":"Introduction","text":"

In this lab, you will learn about how to use docker and how to run applications using docker. This lab will not explicitly give you the commands to progress through these exercises, but will show you a similar expected output.

It's your goal to create the commands needed (shown as < command > at each step) to complete the lab.

"},{"location":"labs/containers/#prerequisites","title":"Prerequisites","text":"
  • Create a Quay account. This account is needed to push images to a container registry. Follow the tutorial to get familiar with interacting with Quay
  • You need to install Docker in your environment. Follow the instructions here to install it on Mac and here to install it on Windows.
"},{"location":"labs/containers/#working-with-docker","title":"Working with docker","text":"

Before proceeding, make sure docker is properly installed on your system.

  1. Please verify your Docker by looking up the version.

If it is installed, you will see a version number something similar to below.

$ <command>\nDocker version 19.03.0-beta3, build c55e026\n

** Running a hello-world container **

Let us start with a hello-world container.

  1. run a hello-world container.

If it is successfully run, you will see something like below.

$ <command>\nUnable to find image 'hello-world:latest' locally\nlatest: Pulling from library/hello-world\n1b930d010525: Pull complete\nDigest: sha256:41a65640635299bab090f783209c1e3a3f11934cf7756b09cb2f1e02147c6ed8\nStatus: Downloaded newer image for hello-world:latest\n\nHello from Docker!\nThis message shows that your installation appears to be working correctly.\n\nTo generate this message, Docker took the following steps:\n 1. The Docker client contacted the Docker daemon.\n 2. The Docker daemon pulled the \"hello-world\" image from the Docker Hub.\n    (amd64)\n 3. The Docker daemon created a new container from that image which runs the\n    executable that produces the output you are currently reading.\n 4. The Docker daemon streamed that output to the Docker client, which sent it\n    to your terminal.\n\nTo try something more ambitious, you can run an Ubuntu container with:\n $ docker run -it ubuntu bash\n\nShare images, automate workflows, and more with a free Docker ID:\n https://hub.docker.com/\n\nFor more examples and ideas, visit:\n https://docs.docker.com/get-started/\n

Since, hello-world image is not existing locally, it is pulled from library/hello-world. But if it is already existing, docker will not pull it every time but rather use the existing one.

This image is pulled from https://hub.docker.com/_/hello-world. Docker hub is a repository used to store docker images. Similarly, you can use your own registries to store images. For example, IBM Cloud provides you a container registry.

Verifying the hello-world image

  1. Now verify if an image is existing in your system locally.

You will then see something like below.

$ <command>\nREPOSITORY          TAG                 IMAGE ID            CREATED             SIZE\nhello-world         latest              fce289e99eb9        5 months ago        1.84kB\n
"},{"location":"labs/containers/#get-the-sample-application","title":"Get the sample application","text":"

To get the sample application, you will need to clone it from github.

# Clone the sample app\ngit clone https://github.com/ibm-cloud-architecture/cloudnative_sample_app.git\n\n# Go to the project's root folder\ncd cloudnative_sample_app/\n
"},{"location":"labs/containers/#run-the-application-on-docker","title":"Run the application on Docker","text":""},{"location":"labs/containers/#build-the-docker-image","title":"Build the docker image","text":"

Let's take look at the docker file before building it.

FROM maven:3.3-jdk-8 as builder\n\nCOPY . .\nRUN mvn clean install\n\nFROM openliberty/open-liberty:springBoot2-ubi-min as staging\n\nCOPY --chown=1001:0 --from=builder /target/cloudnativesampleapp-1.0-SNAPSHOT.jar /config/app.jar\nRUN springBootUtility thin \\\n    --sourceAppPath=/config/app.jar \\\n    --targetThinAppPath=/config/dropins/spring/thinClinic.jar \\\n    --targetLibCachePath=/opt/ol/wlp/usr/shared/resources/lib.index.cache\n
  • Using the FROM instruction, we provide the name and tag of an image that should be used as our base. This must always be the first instruction in the Dockerfile.
  • Using COPY instruction, we copy new contents from the source filesystem to the container filesystem.
  • RUN instruction executes the commands.

This Dockerfile leverages multi-stage builds, which lets you create multiple stages in your Dockerfile to do certain tasks.

In our case, we have two stages.

  • The first one uses maven:3.3-jdk-8 as its base image to download and build the project and its dependencies using Maven.
  • The second stage uses openliberty/open-liberty:springBoot2-ubi-min as its base image to run the compiled code from the previous stage.

The advantage of using the multi-stage builds approach is that the resulting image only uses the base image of the last stage. Meaning that in our case, we will only end up with the openliberty/open-liberty:springBoot2-ubi-min as our base image, which is much tinier than having an image that has both Maven and the JRE.

By using the multi-stage builds approach when it makes sense to use it, you will end up with much lighter and easier to maintain images, which can save you space on your Docker Registry. Also, having tinier images usually means less resource consumption on your worker nodes, which can result cost-savings.

Once, you have the docker file ready, the next step is to build it. The build command allows you to build a docker image which you can later run as a container.

  1. Build the docker file with the image_name of greeting and give it a image_tag of v1.0.0 and build it using the current context.

You will see something like below:

$ <command>\nSending build context to Docker daemon  22.17MB\nStep 1/6 : FROM maven:3.3-jdk-8 as builder\n ---> 9997d8483b2f\nStep 2/6 : COPY . .\n ---> c198e3e54023\nStep 3/6 : RUN mvn clean install\n ---> Running in 24378df7f87b\n[INFO] Scanning for projects...\n.\n.\n.\n[INFO] Installing /target/cloudnativesampleapp-1.0-SNAPSHOT.jar to /root/.m2/repository/projects/cloudnativesampleapp/1.0-SNAPSHOT/cloudnativesampleapp-1.0-SNAPSHOT.jar\n[INFO] Installing /pom.xml to /root/.m2/repository/projects/cloudnativesampleapp/1.0-SNAPSHOT/cloudnativesampleapp-1.0-SNAPSHOT.pom\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n[INFO] ------------------------------------------------------------------------\n[INFO] Total time: 44.619 s\n[INFO] Finished at: 2020-04-06T16:07:04+00:00\n[INFO] Final Memory: 38M/385M\n[INFO] ------------------------------------------------------------------------\nRemoving intermediate container 24378df7f87b\n ---> cc5620334e1b\nStep 4/6 : FROM openliberty/open-liberty:springBoot2-ubi-min as staging\n ---> 021530b0b3cb\nStep 5/6 : COPY --chown=1001:0 --from=builder /target/cloudnativesampleapp-1.0-SNAPSHOT.jar /config/app.jar\n ---> dbc81e5f4691\nStep 6/6 : RUN springBootUtility thin     --sourceAppPath=/config/app.jar     --targetThinAppPath=/config/dropins/spring/thinClinic.jar     --targetLibCachePath=/opt/ol/wlp/usr/shared/resources/lib.index.cache\n ---> Running in 8ea80b5863cb\nCreating a thin application from: /config/app.jar\nLibrary cache: /opt/ol/wlp/usr/shared/resources/lib.index.cache\nThin application: /config/dropins/spring/thinClinic.jar\nRemoving intermediate container 8ea80b5863cb\n ---> a935a129dcb2\nSuccessfully built a935a129dcb2\nSuccessfully tagged greeting:v1.0.0\n
  1. Next, verify your newly built image

The output will be as follows.

$ <command>\nREPOSITORY                           TAG                   IMAGE ID            CREATED             SIZE\ngreeting                             v1.0.0                89bd7032fdee        51 seconds ago      537MB\nopenliberty/open-liberty             springBoot2-ubi-min   bcfcb2c5ce16        6 days ago          480MB\nhello-world                          latest                f9cad508cb4c        5 months ago        1.84kB\n
"},{"location":"labs/containers/#run-the-docker-container","title":"Run the docker container","text":"

Now let's try running the docker container. Run it with the following parameters:

  1. Expose port 9080. Run it in the background in detached mode. Give the container the name of greeting.

Once done, you will have something like below.

$ <command>\nbc2dc95a6bd1f51a226b291999da9031f4443096c1462cb3fead3df36613b753\n

Also, docker cannot create two containers with the same name. If you try to run the same container having the same name again, you will see something like below.

$ <command>\ndocker: Error response from daemon: Conflict. The container name \"/greeting\" is already in use by container \"a74b91789b29af6e7be92b30d0e68eef852bfb24336a44ef1485bb58becbd664\". You have to remove (or rename) that container to be able to reuse that name.\nSee 'docker run --help'.\n

It is a good practice to name your containers. Naming helps you to discover your service easily.

  1. List all the running containers.

You will see something like below.

$ <command>\nCONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                              NAMES\nbc2dc95a6bd1        greeting:v1.0.0     \"/opt/ol/helpers/run\u2026\"   18 minutes ago      Up 18 minutes       0.0.0.0:9080->9080/tcp, 9443/tcp   greeting\n
  1. Let's inspect the running container.

By inspecting the container, you can access detailed information about the container. By using this command, you get to know the details about network settings, volumes, configs, state etc.

If we consider our container, it is as follows. You can see lot of information about the greeting container.

$ <command>\n[\n    {\n        \"Id\": \"bc2dc95a6bd1f51a226b291999da9031f4443096c1462cb3fead3df36613b753\",\n        \"Created\": \"2019-08-30T16:56:40.2081539Z\",\n        \"Path\": \"/opt/ol/helpers/runtime/docker-server.sh\",\n        \"Args\": [\n            \"/opt/ol/wlp/bin/server\",\n            \"run\",\n            \"defaultServer\"\n        ],\n        \"State\": {\n            \"Status\": \"running\",\n            \"Running\": true,\n            \"Paused\": false,\n            \"Restarting\": false,\n            \"OOMKilled\": false,\n            \"Dead\": false,\n            \"Pid\": 27548,\n            \"ExitCode\": 0,\n            \"Error\": \"\",\n            \"StartedAt\": \"2019-08-30T16:56:41.0927889Z\",\n            \"FinishedAt\": \"0001-01-01T00:00:00Z\"\n        },\n        ..........\n        ..........\n        ..........\n    }\n]\n
  1. Get the logs of the greeting container.

It helps you to access the logs of your container. It allows you to debug the container if it fails. It also lets you to know what is happening with your application.

At the end, you will see something like below.

.   ____          _            __ _ _\n/\\\\ / ___'_ __ _ _(_)_ __  __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\/ _` | \\ \\ \\ \\\n\\\\/  ___)| |_)| | | | | || (_| |  ) ) ) )\n'  |____| .__|_| |_|_| |_\\__, | / / / /\n=========|_|==============|___/=/_/_/_/\n:: Spring Boot ::        (v2.1.7.RELEASE)\n2019-08-30 16:57:01.494  INFO 1 --- [ecutor-thread-5] application.SBApplication                : Starting SBApplication on bc2dc95a6bd1 with PID 1 (/opt/ol/wlp/usr/servers/defaultServer/dropins/spring/thinClinic.jar started by default in /opt/ol/wlp/output/defaultServer)\n2019-08-30 16:57:01.601  INFO 1 --- [ecutor-thread-5] application.SBApplication                : No active profile set, falling back to default profiles: default\n[AUDIT   ] CWWKT0016I: Web application available (default_host): http://bc2dc95a6bd1:9080/\n2019-08-30 16:57:09.641  INFO 1 --- [cutor-thread-25] o.s.web.context.ContextLoader            : Root WebApplicationContext: initialization completed in 7672 ms\n2019-08-30 16:57:12.279  INFO 1 --- [ecutor-thread-5] o.s.b.a.e.web.EndpointLinksResolver      : Exposing 15 endpoint(s) beneath base path '/actuator'\n2019-08-30 16:57:12.974  INFO 1 --- [ecutor-thread-5] o.s.s.concurrent.ThreadPoolTaskExecutor  : Initializing ExecutorService 'applicationTaskExecutor'\n2019-08-30 16:57:13.860  INFO 1 --- [ecutor-thread-5] d.s.w.p.DocumentationPluginsBootstrapper : Context refreshed\n2019-08-30 16:57:13.961  INFO 1 --- [ecutor-thread-5] d.s.w.p.DocumentationPluginsBootstrapper : Found 1 custom documentation plugin(s)\n2019-08-30 16:57:14.020  INFO 1 --- [ecutor-thread-5] s.d.s.w.s.ApiListingReferenceScanner     : Scanning for api listing references\n2019-08-30 16:57:14.504  INFO 1 --- [ecutor-thread-5] application.SBApplication                : Started SBApplication in 17.584 seconds (JVM running for 33.368)\n[AUDIT   ] CWWKZ0001I: Application thinClinic started in 21.090 seconds.\n[AUDIT   ] CWWKF0012I: The server installed the following features: [el-3.0, jsp-2.3, servlet-4.0, springBoot-2.0, ssl-1.0, transportSecurity-1.0, websocket-1.1].\n[AUDIT   ] CWWKF0011I: The defaultServer server is ready to run a smarter planet. The defaultServer server started in 33.103 seconds.\n

This shows that the Spring Boot application is successfully started.

"},{"location":"labs/containers/#access-the-application","title":"Access the application","text":"
  • To access the application, open the browser and access http://localhost:9080/greeting?name=John.

You will see something like below.

{\"id\":2,\"content\":\"Welcome to Cloudnative bootcamp !!! Hello, John :)\"}\n

Container Image Registry

Container Image Registry is a place where you can store the container images. They can be public or private registries. They can be hosted by third party as well. In this lab, we are using Quay.

"},{"location":"labs/containers/#pushing-an-image-to-a-registry","title":"Pushing an image to a Registry","text":"

Let us now push the image to the Quay registry. Before pushing the image to the registry, one needs to login.

  1. Login to Quay using your credentials.

Once logged in we need to take the image for the registry.

  1. Tag your image for the image registry using the same name and tag from before. Be sure to include the host name of the target image registry in the destination tag (e.g. quay.io). NOTE: the tag command has both the source tag and repository destination tag in it.

  2. Now push the image to the registry. This allows you to share images to a registry.

If everything goes fine, you will see something like below.

$ <command>\nThe push refers to repository [quay.io/<repository_name>/greeting]\n2e4d09cd03a2: Pushed\nd862b7819235: Pushed\na9212239031e: Pushed\n4be784548734: Pushed\na43c287826a1: Mounted from library/ibmjava\ne936f9f1df3e: Mounted from library/ibmjava\n92d3f22d44f3: Mounted from library/ibmjava\n10e46f329a25: Mounted from library/ibmjava\n24ab7de5faec: Mounted from library/ibmjava\n1ea5a27b0484: Mounted from library/ibmjava\nv1.0.0: digest: sha256:21c2034646a31a18b053546df00d9ce2e0871bafcdf764f872a318a54562e6b4 size: 2415\n

Once the push is successful, your image will be residing in the registry.

"},{"location":"labs/containers/#clean-up","title":"Clean Up","text":"
  1. Stop the greeting container.

  2. Remove the container.

  3. Remove the image. (NOTE: You will need the image_id to remove it.)

"},{"location":"labs/containers/#pulling-an-image-from-the-registry","title":"Pulling an image from the registry","text":"

Sometimes, you may need the images that are residing on your registry. Or you may want to use some public images out there. Then, we need to pull the image from the registry.

  1. Pull the image greeting from the registry,

If it successfully got pulled, we will see something like below.

ddcb5f219ce2: Pull complete\ne3371bbd24a0: Pull complete\n49d2efb3c01b: Pull complete\nDigest: sha256:21c2034646a31a18b053546df00d9ce2e0871bafcdf764f872a318a54562e6b4\nStatus: Downloaded newer image for <repository_name>/greeting:v1.0.0\ndocker.io/<repository_name>/greeting:v1.0.0\n
"},{"location":"labs/containers/#conclusion","title":"Conclusion","text":"

You have successfully completed this lab! Let's take a look at what you learned and did today:

  • Learned about Dockerfile.
  • Learned about docker images.
  • Learned about docker containers.
  • Learned about multi-stage docker builds.
  • Ran the Greetings service on Docker.

Congratulations !!!

"},{"location":"labs/containers/container-registry/","title":"IBM Container Registries","text":"

In this lab we are going to create a Container Image and store it in the IBM Cloud Container Registry

"},{"location":"labs/containers/container-registry/#prerequisites","title":"Prerequisites","text":"
  • IBM Cloud Account
"},{"location":"labs/containers/container-registry/#login-into-ibm-cloud","title":"Login into IBM Cloud","text":""},{"location":"labs/containers/container-registry/#using-the-ibm-cloud-shell","title":"Using the IBM Cloud Shell","text":"
  1. Login into IBM Cloud
  2. Select correct account from top right drop down if your IBM id is associated with multiple accounts
  3. Click the IBM Cloud Shell Icon on the top right corner of the IBM Cloud Console
  4. This opens a new browser window with command linux terminal prompt.
"},{"location":"labs/containers/container-registry/#create-a-new-container-registry-namespace","title":"Create a new Container Registry namespace","text":"
  1. Ensure that you're targeting the correct IBM Cloud Container Registry region. For example for Dallas region use us-south
    ibmcloud cr region-set us-south\n
  2. Choose a name for your first namespace, and create that namespace. Use this namespace for the rest of the Quick Start.Create a new Container Registry Namespace. This namespace is different from a Kubernetes/OpenShift namespace. The name needs to be all lowercase and globaly unique within a region.
    ibmcloud cr namespace-add <my_namespace>\n
    Now set the environment NAMESPACE to be use for the rest of the lab
    export NAMESPACE=<my_namespace>\n
"},{"location":"labs/containers/container-registry/#building-and-pushing-a-container-image","title":"Building and Pushing a Container Image","text":"
  1. Clone the following git repository and change directory to 1-containers
    git clone --depth 1 https://github.com/csantanapr/think2020-nodejs.git my-app\ncd my-app/1-containers/\n
  2. Inspect the file Dockerfile it contains a multistage build, first layer builds the application, the second copies only the built files.
    cat Dockerfile\n
    FROM registry.access.redhat.com/ubi8/nodejs-12 as base\n\nFROM base as builder\n\nWORKDIR /opt/app-root/src\n\nCOPY package*.json ./\n\nRUN npm ci\n\nCOPY public public \nCOPY src src \n\nRUN npm run build\n\nFROM base\n\nWORKDIR /opt/app-root/src\n\nCOPY --from=builder  /opt/app-root/src/build build\n\nCOPY package*.json ./\n\nRUN npm ci --only=production\n\nCOPY --chown=1001:0 server server\nRUN chmod -R g=u server\n\nENV PORT=8080\n\nLABEL com.example.source=\"https://github.com/csantanapr/think2020-nodejs\"\nLABEL com.example.version=\"1.0\"\n\nARG ENV=production\nENV NODE_ENV $ENV\nENV NODE_VERSION $NODEJS_VERSION\nCMD npm run $NODE_ENV\n
  3. Build and push the image, if not already set replace $NAMESPACE with the namespace you added previously, replace us.icr.io if using a different region.
    ibmcloud cr build --tag us.icr.io/$NAMESPACE/my-app:1.0 ./\n
"},{"location":"labs/containers/container-registry/#explore-the-container-registry-on-the-ibm-cloud-console","title":"Explore the Container Registry on the IBM Cloud Console","text":"
  1. Explore the container image details using the IBM Cloud Console. Go to the Main Menu->Kubernetes->Registry you can use the tabs Namespaces, Repository, Images
"},{"location":"labs/containers/container-registry/#extra-credit-run-imge-on-kubernetes","title":"Extra Credit (Run Imge on Kubernetes)","text":"

If you have a Kubernetes Cluster you can run your application image

  1. Get the Access token for your Kubernetes cluster, command assumes your cluster name is mycluster
    ibmcloud ks cluster config -c mycluster\n
  2. Run the following commands to create a deployment using the image we just build. If not already set replace $NAMESPACE with your IBM Container Registry Namespace we stored the image.
    kubectl create deployment my-app --image us.icr.io/$NAMESPACE/my-app:1.0\nkubectl rollout status deployment/my-app\nkubectl port-forward deployment/my-app 8080:8080\n
    If the app is connected you should see the following output
    Forwarding from 127.0.0.1:8080 -> 8080\nForwarding from [::1]:8080 -> 8080\n
  3. Open a new Session and run the following command
    curl localhost:8080 -I\n
    You should see in the first line of output the following
    HTTP/1.1 200 OK\n
  4. To access the app using a browser use the IBM Cloud Shell Web Preview. Click the Web Preview Icon and select port 8080 from the drop down. The application will open in a new browser window.

  5. To stop the application on the terminal with the kubectl port-forward command quit by pressing Ctrl+C in *Session 1

"},{"location":"labs/containers/container-registry/#delete-deployment-and-image","title":"Delete Deployment and Image","text":"
  1. Delete the app deployment
    kubectl delete deployment my-app\n
  2. Delete the container image, if not already set replace $NAMESPACE with the registry namespace
    ibmcloud cr image-rm us.icr.io/$NAMESPACE/my-app:1.0\n
"},{"location":"labs/devops/argocd/","title":"ArgoCD Lab","text":"OpenShiftKubernetes"},{"location":"labs/devops/argocd/#openshift","title":"OpenShift","text":""},{"location":"labs/devops/argocd/#pre-requisites","title":"Pre-requisites","text":"

Make sure your environment is setup properly for the lab.

Check the Environment Setup page for your setup.

"},{"location":"labs/devops/argocd/#argocd-installation","title":"ArgoCD Installation","text":"
  • Create the namespace argocd to install argocd
    oc new-project argocd\n
  • Install ArgoCD as follows.
    oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argocd-operator.yaml\n
  • When installing the tutorial, make sure you wait until the argocd-operator is finished before installing the argocd-cr..or it will fail. You can do this:
    oc get ClusterServiceVersion -n argocd\nNAME                                   DISPLAY                        VERSION   REPLACES   PHASE\nargocd-operator.v0.0.8                 Argo CD                        0.0.8                Succeeded\n
    and wait for the \"succeeded\" to come up before proceeding.
    oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argocd-cr.yaml\n
    and wait for the argocd server Pod to be running
    oc get pods -n argocd -l app.kubernetes.io/name=example-argocd-server\n
    NAME                                     READY   STATUS    RESTARTS   AGE\nexample-argocd-server-57c4fd5c45-zf4q6   1/1     Running   0          115s\n
  • Install the argocd CLI, for example on OSX use brew
    brew tap argoproj/tap\nbrew install argoproj/tap/argocd\n
  • Set an environment variable ARGOCD_URL using the EXTERNAL-IP
    export ARGOCD_NAMESPACE=\"argocd\"\nexport ARGOCD_SERVER=$(oc get route example-argocd-server -n $ARGOCD_NAMESPACE -o jsonpath='{.spec.host}')\nexport ARGOCD_URL=\"https://$ARGOCD_SERVER\"\necho ARGOCD_URL=$ARGOCD_URL\necho ARGOCD_SERVER=$ARGOCD_SERVER\n
"},{"location":"labs/devops/argocd/#deploying-the-app","title":"Deploying the app","text":"
  • Login into the UI.
    open $ARGOCD_URL\n
  • Use admin as the username and get the password with the following command
    oc get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\\.password}' | base64 -d\n
    For example the output is similar to this:
    tyafMb7BNvO0kP9eizx3CojrK8pYJFQq\n
  • Now go back to the ArgoCD home and click on NEW APP.
  • Add the below details:
  • Application Name: sample
  • Project - default
  • SYNC POLICY: Manual
  • REPO URL: https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy
  • Revision: HEAD
  • Path: openshift
  • Cluster - Select the default one https://kubernetes.default.svc to deploy in-cluster
  • Namespace - default
  • Click Create to finish
  • You will now see the available apps.
  • Initially, the app will be out of sync. It is yet to be deployed. You need to sync it for deploying.

To sync the application, click SYNC and then SYNCHRONIZE.

  • Wait till the app is deployed.

  • Once the app is deployed, click on it to see the details.

"},{"location":"labs/devops/argocd/#verifying-the-deployment","title":"Verifying the deployment","text":"
  • Access the app to verify if it is correctly deployed.
  • List the cloudnativesampleapp-service route
    oc get route\n
    It should have an IP under EXTERNAL-IP column
    NAME                 HOST/PORT                                     PATH   SERVICES                       PORT   TERMINATION   WILDCARD\ncloudnative-sample   cloudnative-sample-default.apps-crc.testing          cloudnativesampleapp-service   9080                 None\n
  • Set an environment variable APP_URL using the EXTERNAL-IP
    export APP_URL=\"http://$(oc get route cloudnative-sample -o jsonpath='{.status.ingress[0].host}')\"\necho ARGOCD_SERVER=$APP_URL\n
  • Access the url using curl
    curl \"$APP_URL/greeting?name=Carlos\"\n
    {\"id\":2,\"content\":\"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)\"}\n
"},{"location":"labs/devops/argocd/#using-the-argocd-cli","title":"Using the ArgoCD CLI","text":"
  • Login using the cli.
  • Use admin as the username and get the password with the following command
    export ARGOCD_PASSWORD=$(oc get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\\.password}' | base64 -d)\necho $ARGOCD_PASSWORD\n
  • Now login as follows.
    argocd login --username admin --password $ARGOCD_PASSWORD $ARGOCD_SERVER\n
    WARNING: server certificate had error: x509: cannot validate certificate for 10.97.240.99 because it doesn't contain \nany IP SANs. Proceed insecurely (y/n)? y\n\n'admin' logged in successfully\nContext 'example-argocd-server-argocd.apps-crc.testing' updated\n
  • List the applications
    argocd app list\n
    NAME    CLUSTER                         NAMESPACE  PROJECT  STATUS  HEALTH   SYNCPOLICY  CONDITIONS  REPO                                                                     PATH   TARGET\nsample  https://kubernetes.default.svc  default    default  Synced  Healthy  <none>      <none>      https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy  openshift  HEAD\n
  • Get application details
    argocd app get sample\n
    Name:               sample\nProject:            default\nServer:             https://kubernetes.default.svc\nNamespace:          default\nURL:                https://10.97.240.99/applications/sample\nRepo:               https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy\nTarget:             HEAD\nPath:               openshift\nSyncWindow:         Sync Allowed\nSync Policy:        <none>\nSync Status:        Synced to HEAD (9684037)\nHealth Status:      Healthy\n\nGROUP  KIND        NAMESPACE  NAME                             STATUS  HEALTH   HOOK  MESSAGE\n    Service     default    cloudnativesampleapp-service     Synced  Healthy        service/cloudnativesampleapp-service created\napps   Deployment  default    cloudnativesampleapp-deployment  Synced  Healthy        deployment.apps/cloudnativesampleapp-deployment created\n
  • Show application deployment history
    argocd app history sample\n
    ID  DATE                           REVISION\n0   2020-02-12 21:10:32 -0500 EST  HEAD (9684037)\n
"},{"location":"labs/devops/argocd/#references","title":"References","text":"
  • ArgoCD
"},{"location":"labs/devops/argocd/#kubernetes","title":"Kubernetes","text":""},{"location":"labs/devops/argocd/#pre-requisites_1","title":"Pre-requisites","text":"

Make sure your environment is setup properly for the lab.

Check the Environment Setup page for your setup.

"},{"location":"labs/devops/argocd/#argocd-installation_1","title":"ArgoCD Installation","text":"
  • Create the namespace argocd to install argocd
    kubectl create namespace argocd\nexport ARGOCD_NAMESPACE=argocd\n
  • Create RBAC resources

    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/service_account.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/role.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/role_binding.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/argo-lab/argo-clusteradmin.yaml\n

  • Install CRDs

    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/argo-cd/argoproj.io_applications_crd.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/argo-cd/argoproj.io_appprojects_crd.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/crds/argoproj.io_argocdexports_crd.yaml\nkubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/crds/argoproj.io_argocds_crd.yaml\n
    Verify CRDs
    kubectl get crd -n argocd\n
    NAME                        CREATED AT\napplications.argoproj.io    2020-05-15T02:05:55Z\nappprojects.argoproj.io     2020-05-15T02:05:56Z\nargocdexports.argoproj.io   2020-05-15T02:08:21Z\nargocds.argoproj.io         2020-05-15T02:08:21Z\n

  • Deploy Operator
    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/deploy/operator.yaml\n
  • Deploy ArgoCD CO
    kubectl create -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-operator/v0.0.8/examples/argocd-lb.yaml\n
    Verify that ArgoCD Pods are running
    kubectl get pods -n argocd\n
    NAME                                                     READY   STATUS    RESTARTS   AGE\nargocd-operator-5f7d8cf7d8-486vn                         1/1     Running   0          3m46s\nexample-argocd-application-controller-7dc5fcb75d-xkk5h   1/1     Running   0          2m3s\nexample-argocd-dex-server-bb9df96cb-ndmhl                1/1     Running   0          2m3s\nexample-argocd-redis-756b6764-sb2gt                      1/1     Running   0          2m3s\nexample-argocd-repo-server-75944fcf87-zmh48              1/1     Running   0          2m3s\nexample-argocd-server-747b684c8c-xhgl9                   1/1     Running   0          2m3s\n
    Verify that the other ArgoCD resources are created
    kubectl get cm,secret,svc,deploy -n argocd\n
  • List the argocd-server service

    kubectl get svc example-argocd-server -n argocd\n

  • From the script, the Argo Server service has a type of LoadBalancer. If the ExternalIP is in a pending state, then there is no loadBalancer for your cluster, so we only need the the ArgoCD server's NodePort. Otherwise use the ExternalIP and NodePort to access Argo.

    NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                      AGE\nexample-argocd-server   LoadBalancer   10.105.73.245   <pending>   80:31138/TCP,443:31932/TCP   5m3s\n

  • To access the service we need the Node's External IP and the NodePort. Let's set an environment variable ARGOCD_URL with NODE_EXTERNAL_IP:NodePort.

    export NODE_EXTERNAL_IP=\"$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type==\"ExternalIP\")].address}')\"\nexport ARGOCD_NODEPORT=\"$(kubectl get svc example-argocd-server -n $ARGOCD_NAMESPACE -o jsonpath='{.spec.ports[0].nodePort}')\"\nexport ARGOCD_URL=\"https://$NODE_EXTERNAL_IP:$ARGOCD_NODEPORT\"\necho ARGOCD_URL=$ARGOCD_URL\n

  • If you can't access the NodePort from your computer and only http/80 then edit the argocd-server and add the flag --insecure

    kubectl edit -n argocd deployment example-argocd-server\n
    Use the kube api to proxy into the argocd server using kubectl port-forward
    kubectl port-forward service/example-argocd-server 8080:80 -n argocd\n
    Then you can access the argocd server locally on port 8080 http://localhost:8080

"},{"location":"labs/devops/argocd/#deploying-the-app_1","title":"Deploying the app","text":"
  • Login using the Browser into the UI using $ARGOCD_URL or localhost:8080 if using port-forward
  • Use admin as the username and get the password with the following command
    kubectl get secret example-argocd-cluster -n $ARGOCD_NAMESPACE -o jsonpath='{.data.admin\\.password}' | base64 -d\n
    For example the output is similar to this:
    tyafMb7BNvO0kP9eizx3CojrK8pYJFQq\n
  • Now go back to the ArgoCD home and click on NEW APP.
  • Add the below details:
  • Application Name: sample
  • Project - default
  • SYNC POLICY: Manual
  • REPO URL: https://github.com/ibm-cloud-architecture/cloudnative_sample_app_deploy
  • Revision: HEAD
  • Path: kubernetes
  • Cluster - Select the default one https://kubernetes.default.svc to deploy in-cluster
  • Namespace - default
  • Click Create to finish
  • You will now see the available apps.
  • Initially, the app will be out of sync. It is yet to be deployed. You need to sync it for deploying.

To sync the application, click SYNC and then SYNCHRONIZE.

  • Wait till the app is deployed.

  • Once the app is deployed, click on it to see the details.

"},{"location":"labs/devops/argocd/#verifying-the-deployment_1","title":"Verifying the deployment","text":"
  • Access the app to verify if it is correctly deployed.
  • List the cloudnativesampleapp-service service
    kubectl get svc cloudnativesampleapp-service\n
    It will have the NodePort for the application. In this case, it is 30499.
    NAME                           TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE\ncloudnativesampleapp-service   NodePort   172.21.118.165   <none>        9080:30499/TCP   20s\n
  • Set an environment variable APP_URL using the Node's IP and NodePort values
    export APP_NODE_PORT=\"$(kubectl get svc cloudnativesampleapp-service -n default -o jsonpath='{.spec.ports[0].nodePort}')\"\nexport APP_URL=\"$NODE_EXTERNAL_IP:$APP_NODE_PORT\"\necho Application=$APP_URL\n
  • Access the url using curl
    curl \"$APP_URL/greeting?name=Carlos\"\n
    {\"id\":2,\"content\":\"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)\"}\n
"},{"location":"labs/devops/argocd/#references_1","title":"References","text":"
  • ArgoCD
"},{"location":"labs/devops/ibm-toolchain/","title":"IBM Toolchain Lab","text":"

By following this tutorial, you create an open toolchain that includes a Tekton-based delivery pipeline. You then use the toolchain and DevOps practices to develop a simple \"Hello World\" web application (app) that you deploy to the IBM Cloud Kubernetes Service.

Tekton is an open source, vendor-neutral, Kubernetes-native framework that you can use to build, test, and deploy apps to Kubernetes. Tekton provides a set of shared components for building continuous integration and continuous delivery (CICD) systems. As an open source project, Tekton is managed by the Continuous Delivery Foundation (CDF). The goal is to modernize continuous delivery by providing industry specifications for pipelines, workflows, and other building blocks. With Tekton, you can build, test, and deploy across cloud providers or on-premises systems by abstracting the underlying implementation details. Tekton pipelines are built in to IBM Cloud\u2122 Continuous Delivery..

After you create the cluster and the toolchain, you change your app's code and push the change to the Git Repos and Issue Tracking repository (repo). When you push changes to your repo, the delivery pipeline automatically builds and deploys the code.

"},{"location":"labs/devops/ibm-toolchain/#prerequisites","title":"Prerequisites","text":"
  1. You must have an IBM Cloud account. If you don't have one, sign up for a trial. The account requires an IBMid. If you don't have an IBMid, you can create one when you register.
  2. Verify the toolchains and tool integrations that are available in your region and IBM Cloud environment. A toolchain is a set of tool integrations that support development, deployment, and operations tasks.

  3. You need a Kubernetes cluster and an API key. You can create them by using either the UI or the CLI. You can create from the IBM Cloud Catalog

  4. Create a container registry namespace to deploy the container we are goign to build. Youc an create from the Container Registry UI

  5. Create the API key by using the string that is provided for your key name.

    ibmcloud iam api-key-create my-api-key\n
    Save the API key value that is provided by the command.

"},{"location":"labs/devops/ibm-toolchain/#create-continues-delivery-service-instance","title":"Create Continues Delivery Service Instance","text":"
  1. Open the IBM Cloud Catalog
  2. Search for delivery
  3. Click on Continuous Delivery
  4. Select Dallas Region, as the Tutorial will be using Managed Tekton Worker available in Dallas only.
  5. Select a Plan
  6. Click Create
"},{"location":"labs/devops/ibm-toolchain/#create-an-ibm-cloud-toolchain","title":"Create an IBM Cloud Toolchain","text":"

In this task, you create a toolchain and add the tools that you need for this tutorial. Before you begin, you need your API key and Kubernetes cluster name.

  1. Open the menu in the upper-left corner and click DevOps. Click ToolChains. Click Create a toolchain. Type in the search box toolchain. Click Build Your Own Toolchain.
  2. On the \"Build your own toolchain\" page, review the default information for the toolchain settings. The toolchain's name identifies it in IBM Cloud. Each toolchain is associated with a specific region and resource group. From the menus on the page, select the region Dallas since we are going to use the Beta Managed Tekton Worker, if you use Private Workers you can use any Region.
  3. Click Create. The blank toolchain is created.
  4. Click Add a Tool and click Git Repos and Issue Tracking.
    • From the Repository type list, select Clone.
    • In the Source repository URL field, type https://github.com/csantanapr/hello-tekton.git.
    • Make sure to uncheck the Make this repository private checkbox and that the Track deployment of code changes checkbox is selected.
    • Click Create Integration. Tiles for Git Issues and Git Code are added to your toolchain.
  5. Return to your toolchain's overview page.
  6. Click Add a Tool. Type pipeline in seach box and click Delivery Pipeline.
    • Type a name for your new pipeline.
    • Click Tekton.
    • Make sure that the Show apps in the View app menu checkbox is selected. All the apps that your pipeline creates are shown in the View App list on the toolchain's Overview page.
    • Click Create Integration to add the Delivery Pipeline to your toolchain.
  7. Click Delivery Pipeline to open the Tekton Delivery Pipeline dashboard. Click the Definitions tab and complete these tasks:
  8. Click Add to add your repository.
  9. Specify the Git repo and URL that contains the Tekton pipeline definition and related artifacts. From the list, select the Git repo that you created earlier.
  10. Select the branch in your Git repo that you want to use. For this tutorial, use the default value.
  11. Specify the directory path to your pipeline definition within the Git repo. You can reference a specific definition within the same repo. For this tutorial, use the default value.
  12. Click Add, then click Save
  13. Click the Worker tab and select the private worker that you want to use to run your Tekton pipeline on the associated cluster. Either select the private worker you set up in the previous steps, or select the IBM Managed workers in DALLAS option.
  14. Click Save
  15. Click the Triggers tab, click Add trigger, and click Git Repository. Associate the trigger with an event listener:
  16. From the Repository list, select your repo.
  17. Select the When a commit is pushed checkbox, and in the EventListener field, make sure that listener is selected.
  18. Click Save
  19. On the Triggers tab, click Add trigger and click Manual. Associate that trigger with an event listener:
  20. In the EventListener field, make sure that listener is selected.
  21. Click Save. Note: Manual triggers run when you click Run pipeline and select the trigger. Git repository triggers run when the specified Git event type occurs for the specified Git repo and branch. The list of available event listeners is populated with the listeners that are defined in the pipeline code repo.
  22. Click the Environment properties tab and define the environment properties for this tutorial. To add each property, click Add property and click Text property. Add these properties:
Parameter Required? Description apikey required Type the API key that you created earlier in this tutorial. cluster Optional (cluster) Type the name of the Kubernetes cluster that you created. registryNamespace required Type the IBM Image Registry namespace where the app image will be built and stored. To use an existing namespace, use the CLI and run ibmcloud cr namespace-list to identify all your current namespaces repository required Type the source Git repository where your resources are stored. This value is the URL of the Git repository that you created earlier in this tutorial. To find your repo URL, return to your toolchain and click the Git tile. When the repository is shown, copy the URL. revision Optional (master) The Git branch clusterRegion Optional (us-south) Type the region where your cluster is located. clusterNamespace Optional (prod) The namespace in your cluster where the app will be deployed. registryRegion Optional (us-south) The region where your Image registry is located. To find your registry region, use the CLI and run ibmcloud cr region.

12. Click Save

"},{"location":"labs/devops/ibm-toolchain/#explore-the-pipeline","title":"Explore the pipeline","text":"

With a Tekton-based delivery pipeline, you can automate the continuous building, testing, and deployment of your apps.

The Tekton Delivery Pipeline dashboard displays an empty table until at least one Tekton pipeline runs. After a Tekton pipeline runs, either manually or as the result of external Git events, the table lists the run, its status, and the last updated time of the run definition.

To run the manual trigger that you set up in the previous task, click Run pipeline and select the name of the manual trigger that you created. The pipeline starts to run and you can see the progress on the dashboard. Pipeline runs can be in any of the following states:

  • Pending: The PipelineRun definition is queued and waiting to run.
  • Running: The PipelineRun definition is running in the cluster.
  • Succeeded: The PipelineRun definition was successfully completed in the cluster.
  • Failed: The PipelineRun definition run failed. Review the log file for the run to determine the cause.

  • For more information about a selected run, click any row in the table. You view the Task definition and the steps in each PipelineRun definition. You can also view the status, logs, and details of each Task definition and step, and the overall status of the PipelineRun definition.

  • The pipeline definition is stored in the pipeline.yaml file in the .tekton folder of your Git repository. Each task has a separate section of this file. The steps for each task are defined in the tasks.yaml file.

  • Review the pipeline-build-task. The task consists of a git clone of the repository followed by two steps:

    • pre-build-check: This step checks for the mandatory Dockerfile and runs a lint tool. It then checks the registry current plan and quota before it creates the image registry namespace if needed.
    • build-docker-image: This step creates the Docker image by using the IBM Cloud Container Registry build service through the ibmcloud cr build CLI script.
  • Review the pipeline-validate-task. The task consists of a git clone of the repository, followed by the check-vulnerabilities step. This step runs the IBM Cloud Vulnerability Advisor on the image to check for known vulnerabilities. If it finds a vulnerability, the job fails, preventing the image from being deployed. This safety feature prevents apps with security holes from being deployed. The image has no vulnerabilities, so it passes. In this tutorial template, the default configuration of the job is to not block on failure.
  • Review the pipeline-deploy-task. The task consists of a git clone of the repository followed by two steps:
    • pre-deploy-check: This step checks whether the IBM Container Service cluster is ready and has a namespace that is configured with access to the private image registry by using an IBM Cloud API Key.
    • deploy-to-kubernetes: This step updates the deployment.yml manifest file with the image url and deploys the application using kubectl apply
  • After all the steps in the pipeline are completed, a green status is shown for each task. Click the deploy-to-kubernetes step and click the Logs tab to see the successful completion of this step.
  • Scroll to the end of the log. The DEPLOYMENT SUCCEEDED message is shown at the end of the log.
  • Click the URL to see the running application.
"},{"location":"labs/devops/ibm-toolchain/#modify-the-app-code","title":"Modify the App Code","text":"

In this task, you modify the application and redeploy it. You can see how your Tekton-based delivery pipeline automatically picks up the changes in the application on commit and redeploys the app.

  1. On the toolchain's Overview page, click the Git tile for your application.
    • Tip: You can also use the built-in Eclipse Orion-based Web IDE, a local IDE, or your favorite editor to change the files in your repo.
  2. In the repository directory tree, open the app.js file.
  3. Edit the text message code to change the welcome message.
  4. Commit the updated file by typing a commit message and clicking Commit changes to push the change to the project's remote repository.
  5. Return to the toolchain's Overview page by clicking the back arrow.
  6. Click Delivery Pipeline. The pipeline is running because the commit automatically started a build. Over the next few minutes, watch your change as it is built, tested, and deployed.
  7. After the deploy-to-kubernetes step is completed, refresh your application URL. The updated message is shown.
"},{"location":"labs/devops/ibm-toolchain/#clean-up-resources","title":"Clean up Resources","text":"

In this task, you can remove any of the content that is generated by this tutorial. Before you begin, you need the IBM Cloud CLI and the IBM Cloud Kubernetes Service CLI. Instructions to install the CLI are in the prerequisite section of this tutorial.

  1. Delete the git repository, sign in into git, select personal projects. Then go to repository General settings and remove the repository.
  2. Delete the toolchain. You can delete a toolchain and specify which of the associated tool integrations you want to delete. When you delete a toolchain, the deletion is permanent.
    • On the DevOps dashboard, on the Toolchains page, click the toolchain to delete. Alternatively, on the app's Overview page, on the Continuous delivery card, click View Toolchain.
    • Click the More Actions menu, which is next to View app.
    • Click Delete. Deleting a toolchain removes all of its tool integrations, which might delete resources that are managed by those integrations.
    • Confirm the deletion by typing the name of the toolchain and clicking Delete.
    • Tip: When you delete a GitHub, GitHub Enterprise, or Git Repos and Issue Tracking tool integration, the associated repo isn't deleted from GitHub, GitHub Enterprise, or Git Repos and Issue Tracking. You must manually remove the repo.
  3. Delete the cluster or discard the namespace from it. It is easiest to delete the entire namespace (Please do not delete the default namespace) by using the IBM Cloud\u2122 Kubernetes Service CLI from a command-line window. However, if you have other resources that you need to keep in the namespace, you need to delete the application resources individually instead of the entire namespace. To delete the entire namespace, enter this command:
    kubectl delete namespace [not-the-default-namespace]\n
  4. Delete your IBM Cloud API key.
  5. From the Manage menu, click Access (IAM). Click IBM Cloud API Keys.
  6. Find your API Key in the list and select Delete from the menu to the right of the API Key name.
  7. Delete the container images. To delete the images in your container image registry, enter this command in a command-line window:
    ibmcloud cr image-rm IMAGE [IMAGE...]\n
    If you created a registry namespace for the tutorial, delete the entire registry namespace by entering this command:
    ibmcloud cr namespace-rm NAMESPACE\n
    • Note: You can run this tutorial many times by using the same registry namespace and cluster parameters without discarding previously generated resources. The generated resources use randomized names to avoid conflicts.
"},{"location":"labs/devops/ibm-toolchain/#summary","title":"Summary","text":"

You created a toolchain with a Tekton-based delivery pipeline that deploys a \"Hello World\" app to a secure container in a Kubernetes cluster. You changed a message in the app and tested your change. When you pushed the change to the repo, the delivery pipeline automatically redeployed the app.

  • Read more about the IBM Cloud Kubernetes Service
  • Read more about Tekton
  • Explore the DevOps reference architecture.
"},{"location":"labs/devops/jenkins/","title":"Jenkins Lab","text":"OpenShiftKubernetes"},{"location":"labs/devops/jenkins/#introduction","title":"Introduction","text":"

In this lab, you will learn about how to define Continuous Integration for your application. We are using Jenkins to define it.

Jenkins

Jenkins is a popular open source Continuous Integration tool. It is built in Java. It allows the developers to perform continuous integration and build automation. It allows you to define steps and executes them based on the instructions like building the application using build tools like Ant, Gradle, Maven etc, executing shell scripts, running tests etc. All the steps can be executed based on the timing or event. It depends on the setup. It helps to monitor all these steps and sends notifications to the team members in case of failures. Also, it is very flexible and has a large plugin list which one easily add based on their requirements.

Check these guides out if you want to know more about Jenkins - Jenkins, Leading open source automation server.

"},{"location":"labs/devops/jenkins/#prerequisites","title":"Prerequisites","text":"
  • You need an IBM cloud account.
  • Create kubernetes cluster using IBM Cloud Kubernetes Service. Here, you can choose an openshift cluster.
  • Install oc command line tool.
  • You should be familiar with basics like Containers, Docker, Kubernetes.
"},{"location":"labs/devops/jenkins/#continuous-integration","title":"Continuous Integration","text":""},{"location":"labs/devops/jenkins/#install-jenkins","title":"Install Jenkins","text":"
  • Open the IBM Cloud Openshift cluster.
  • Click on the OpenShift web console tab and this will take you to openshift UI.
  • Create a new project.
  • Search for Jenkins.
  • Choose Jenkins (Ephemeral).
  • Install it.
  • Wait till the Jenkins installs and the pods are ready.
  • Once, it is ready you can access the Jenkins by clicking the link.

Now, click on Log in with OpenShift.

  • When you gets logged in, you will see the below screen. Click Allow selected permissions.

  • You will be able to access the Jenkins UI now.

"},{"location":"labs/devops/jenkins/#get-the-sample-app","title":"Get the Sample App","text":"
  • Fork the below repository.
https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n
  • Clone the forked repository.
$ git clone https://github.com/(user)/cloudnative_sample_app.git\n
"},{"location":"labs/devops/jenkins/#jenkinsfile","title":"Jenkinsfile","text":"

Before setting up the CI pipeline, let us first have a look at our Jenkinsfile and understand the stages here.

Open your Jenkinsfile or you can also access it https://github.com/ibm-cloud-architecture/cloudnative_sample_app/blob/master/Jenkinsfile[here].

In our Jenkins file, we have five stages.

  • Local - Build

In this stage, we are building the application and packaging it using maven.

  • Local - Test

In this stage, we are making all the unit tests are running fine by running maven test.

  • Local - Run

In this stage, we are running the application using the previous build and verifying the application performing health and api checks.

  • Build and Push Image

  • We are logging in to the IBM Cloud and accessing the IBM Cloud Container Registry.

  • We are also creating a namespace if not present.
  • We are building the image using ibmcloud cli tools.
  • Once the image is built, it is pushed into the container registry.

In this stage, we are building the docker image and pushing it to the registry.

  • Push to Deploy repo

  • Initially, we are cloning the deploy repository.

  • Changing the image tag to the one we previously built and pushed.
  • Pushing this new changes to the deploy repository.

In this stage, we are pushing the new artifact tag to the deploy repository which will later be used by the Continuous Delivery system.

"},{"location":"labs/devops/jenkins/#jenkins-credentials","title":"Jenkins Credentials","text":"

Let us now build all the credentials required by the pipeline.

  • In the Jenkins home page, click on Credentials.

  • In the Credentials page, click on Jenkins.

  • Now, click on Global Credentials (UnRestricted).

  • Click on Add Credentials to create the ones required for this lab.

image::Jenkins_add_creds.png[align=\"center\"]

  • Now create a secrets as follows.

Kind : Secret Text Secret: (Your container registry url, for eg., us.icr.io) ID: registry_url

Once created, you will see something like below.

Similarly create the rest of the credentials as well.

Kind : Secret Text Secret: (Your registry namespace, for eg., catalyst_cloudnative) ID: registry_namespace

Kind : Secret Text Secret: (Your IBM cloud region, for eg., us-east) ID: ibm_cloud_region

Kind : Secret Text Secret: (Your IBM Cloud API key) ID: ibm_cloud_api_key

Kind : Secret Text Secret: (Your Github Username) ID: git-account

Kind : Secret Text Secret: (Your Github Token) ID: github-token

Once all of them are created, you will have the list as follows.

"},{"location":"labs/devops/jenkins/#jenkins-pipeline","title":"Jenkins Pipeline","text":"
  • Create a new pieline. Go to Jenkins ) Click on New Item.
  • Enter the name of the application, choose Pipeline and click OK.
  • Now go to the Pipeline tab and enter the details of the repository.

  • In the Definition, choose Pipeline script from SCM.

  • Mention SCM as Git.
  • Enter the repository URL in Repository URL.
  • Specify master as the branch to build.
  • Save this information.

  • To initiate a build, click Build Now.

  • Once the build is successful, you will see something like below.

After this build is done, your deploy repository will be updated by the Jenkins.

"},{"location":"labs/devops/jenkins/#introduction_1","title":"Introduction","text":"

In this lab, you will learn about how to define Continuous Integration for your application. We are using https://jenkins.io/[Jenkins] to define it.

Jenkins

Jenkins is a popular open source Continuous Integration tool. It is built in Java. It allows the developers to perform continuous integration and build automation. It allows you to define steps and executes them based on the instructions like building the application using build tools like Ant, Gradle, Maven etc, executing shell scripts, running tests etc. All the steps can be executed based on the timing or event. It depends on the setup. It helps to monitor all these steps and sends notifications to the team members in case of failures. Also, it is very flexible and has a large plugin list which one easily add based on their requirements.

Check these guides out if you want to know more about Jenkins - https://jenkins.io/doc/[Jenkins, Leading open source automation server].

"},{"location":"labs/devops/jenkins/#prerequisites_1","title":"Prerequisites","text":"
  • You need an https://cloud.ibm.com/login[IBM cloud account].
  • Create kubernetes cluster using https://cloud.ibm.com/docs/containers?topic=containers-getting-started[IBM Cloud Kubernetes Service]. Here, you can choose a kubernetes cluster.
  • Install https://kubernetes.io/docs/tasks/tools/install-kubectl/[kubectl] command line tool.
  • You should be familiar with basics like Containers, Docker, Kubernetes.
"},{"location":"labs/devops/jenkins/#continuous-integration_1","title":"Continuous Integration","text":""},{"location":"labs/devops/jenkins/#install-jenkins_1","title":"Install Jenkins","text":"
  • Initially log in into your ibm cloud account as follows.
$ ibmcloud login -a cloud.ibm.com -r (region) -g (cluster_name)\n

And then download the Kube config files as below.

$ ibmcloud ks cluster-config --cluster (cluster_name)\n

You can also get the access instructions in IBM Cloud Dashboard -> Kubernetes Clusters -> Click on your Cluster -> Click on Access Tab.

  • Install Jenkins using helm using the below command. We are not using persistence in this lab.
$ helm install --name cloudnative-jenkins --set persistence.enabled=false stable/jenkins\n

If it is successfully executed, you will see something like below.

$ helm install --name cloudnative-jenkins --set persistence.enabled=false stable/jenkins\nNAME:   cloudnative\nLAST DEPLOYED: Wed Aug  7 16:22:55 2019\nNAMESPACE: default\nSTATUS: DEPLOYED\n\nRESOURCES:\n==> v1/ConfigMap\nNAME                       DATA  AGE\ncloudnative-jenkins        5     1s\ncloudnative-jenkins-tests  1     1s\n\n==> v1/Deployment\nNAME                 READY  UP-TO-DATE  AVAILABLE  AGE\ncloudnative-jenkins  0/1    1           0          1s\n\n==> v1/Pod(related)\nNAME                                  READY  STATUS    RESTARTS  AGE\ncloudnative-jenkins-57588c86c7-hxqmq  0/1    Init:0/1  0         0s\n\n==> v1/Role\nNAME                                 AGE\ncloudnative-jenkins-schedule-agents  1s\n\n==> v1/RoleBinding\nNAME                                 AGE\ncloudnative-jenkins-schedule-agents  1s\n\n==> v1/Secret\nNAME                 TYPE    DATA  AGE\ncloudnative-jenkins  Opaque  2     1s\n\n==> v1/Service\nNAME                       TYPE          CLUSTER-IP      EXTERNAL-IP     PORT(S)         AGE\ncloudnative-jenkins        LoadBalancer  172.21.143.35   169.63.132.124  8080:32172/TCP  1s\ncloudnative-jenkins-agent  ClusterIP     172.21.206.235  (none>          50000/TCP       1s\n\n==> v1/ServiceAccount\nNAME                 SECRETS  AGE\ncloudnative-jenkins  1        1s\n

Use the following steps to open Jenkins UI and login.

NOTES:\n1. Get your 'admin' user password by running:\nprintf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath=\"{.data.jenkins-admin-password}\" | base64 --decode);echo\n2. Get the Jenkins URL to visit by running these commands in the same shell:\nNOTE: It may take a few minutes for the LoadBalancer IP to be available.\n        You can watch the status of by running 'kubectl get svc --namespace default -w cloudnative-jenkins'\nexport SERVICE_IP=$(kubectl get svc --namespace default cloudnative-jenkins --template \"{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}\")\necho http://$SERVICE_IP:8080/login\n\n3. Login with the password from step 1 and the username: admin\n\n\nFor more information on running Jenkins on Kubernetes, visit:\nhttps://cloud.google.com/solutions/jenkins-on-container-engine\n#################################################################################\n######   WARNING: Persistence is disabled!!! You will lose your data when   #####\n######            the Jenkins pod is terminated.                            #####\n#################################################################################\n

To get the url, run the below commands.

$ export SERVICE_IP=$(kubectl get svc --namespace default cloudnative-jenkins --template \"{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}\")\n$ echo http://$SERVICE_IP:8080/login\n

Once executed, you will see something like below.

$ echo http://$SERVICE_IP:8080/login\nhttp://169.63.132.124:8080/login\n
  • Now, let us login into the Jenkins.

The user name will be admin and to get the password, run the below command.

$ printf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath=\"{.data.jenkins-admin-password}\" | base64 --decode);echo\n

It returns you the password as follows.

$ printf $(kubectl get secret --namespace default cloudnative-jenkins -o jsonpath=\"{.data.jenkins-admin-password}\" | base64 --decode);echo\npassword\n
  • Once, successfully logged in you will see the Jenkins home page which is as follows.

"},{"location":"labs/devops/jenkins/#get-the-sample-app_1","title":"Get the Sample App","text":"
  • Fork the below repository.

    https://github.com/ibm-cloud-architecture/cloudnative_sample_app

  • Clone the forked repository.

$ git clone https://github.com/(user)/cloudnative_sample_app.git\n
"},{"location":"labs/devops/jenkins/#jenkinsfile_1","title":"Jenkinsfile","text":"

Before setting up the CI pipeline, let us first have a look at our Jenkinsfile and understand the stages here.

Open your Jenkinsfile or you can also access it https://github.com/ibm-cloud-architecture/cloudnative_sample_app/blob/master/Jenkinsfile[here].

In our Jenkins file, we have five stages.

  • Local - Build

In this stage, we are building the application and packaging it using maven.

  • Local - Test

In this stage, we are making all the unit tests are running fine by running maven test.

  • Local - Run

In this stage, we are running the application using the previous build and verifying the application performing health and api checks.

  • Build and Push Image

  • We are logging in to the IBM Cloud and accessing the IBM Cloud Container Registry.

  • We are also creating a namespace if not present.
  • We are building the image using ibmcloud cli tools.
  • Once the image is built, it is pushed into the container registry.

In this stage, we are building the docker image and pushing it to the registry.

  • Push to Deploy repo

  • Initially, we are cloning the deploy repository.

  • Changing the image tag to the one we previously built and pushed.
  • Pushing this new changes to the deploy repository.

In this stage, we are pushing the new artifact tag to the deploy repository which will later be used by the Continuous Delivery system.

"},{"location":"labs/devops/jenkins/#jenkins-credentials_1","title":"Jenkins Credentials","text":"

Let us now build all the credentials required by the pipeline.

  • In the Jenkins home page, click on Credentials.

  • In the Credentials page, click on Jenkins.

  • Now, click on Global Credentials (UnRestricted).

  • Click on Add Credentials to create the ones required for this lab.

  • Now create a secrets as follows.

Kind : Secret Text Secret: Your container registry url, for eg., us.icr.io ID: registry_url

Once created, you will see something like below.

Similarly create the rest of the credentials as well.

Kind : Secret Text Secret: (Your registry namespace, for eg., catalyst_cloudnative) ID: registry_namespace

Kind : Secret Text Secret: (Your IBM cloud region, for eg., us-east) ID: ibm_cloud_region

Kind : Secret Text Secret: (Your IBM Cloud API key) ID: ibm_cloud_api_key

Kind : Secret Text Secret: (Your Github Username) ID: git-account

Kind : Secret Text Secret: (Your Github Token) ID: github-token

Once all of them are created, you will have the list as follows.

"},{"location":"labs/devops/jenkins/#jenkins-pipeline_1","title":"Jenkins Pipeline","text":"
  • Create a new pieline. Go to Jenkins ) Click on New Item.
  • Enter the name of your application, select Pipeline and then click OK.
  • In General, check This project is parameterized. Create a string parameter with name CLOUD and Default value kubernetes.
  • Now go to the Pipeline tab and enter the details of the repository.

  • In the Definition, choose Pipeline script from SCM.

  • Mention SCM as Git.
  • Enter the repository URL in Repository URL.
  • Specify master as the branch to build.
  • Save this information.

  • To initiate a build, click Build with Parameters.

  • Once the build is successful, you will see something like below.

After this build is done, your deploy repository will be updated by the Jenkins.

"},{"location":"labs/devops/tekton/","title":"Tekton","text":"OpenShiftKubernetes"},{"location":"labs/devops/tekton/#prerequisites","title":"Prerequisites","text":"

Make sure your environment is properly setup.

Follow the instructions here

"},{"location":"labs/devops/tekton/#setup","title":"SetUp","text":""},{"location":"labs/devops/tekton/#tekton-cli-installation","title":"Tekton CLI Installation","text":"
  • Tekton CLI is command line utility used to interact with the Tekton resources.

  • Follow the instructions on the tekton CLI github repository https://github.com/tektoncd/cli#installing-tkn

  • For MacOS for example you can use brew

    brew tap tektoncd/tools\nbrew install tektoncd/tools/tektoncd-cli\n

  • Verify the Tekton cli
    tkn version\n
  • The command should show a result like:
    $ tkn version\nClient version: 0.10.0\n
  • If you already have the tkn install you can upgrade running
    brew upgrade tektoncd/tools/tektoncd-cli\n
"},{"location":"labs/devops/tekton/#tekton-pipelines-installation","title":"Tekton Pipelines Installation","text":"
  • To deploy the Tekton pipelines: oc apply --filename https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/static/yamls/tekton-lab/tekton-operator.yaml
  • Note: It will take few mins for the Tekton pipeline components to be installed, you an watch the status using the command:
    oc get pods -n openshift-operators -w\n
    You can use Ctrl+c to terminate the watch
  • A successful deployment of Tekton pipelines will show the following pods:
    NAME                                         READY   STATUS    RESTARTS   AGE\nopenshift-pipelines-operator-9cdbbb854-x9tvs   1/1     Running   0          25s\n
"},{"location":"labs/devops/tekton/#create-target-namespace","title":"Create Target Namespace","text":"
  • Set the environment variable NAMESPACE to tekton-demo, if you open a new terminal remember to set this environment again
    export NAMESPACE=tekton-demo\n
  • Create a the namespace using the variable NAMESPACE
    oc new-project $NAMESPACE\n
"},{"location":"labs/devops/tekton/#tasks","title":"Tasks","text":""},{"location":"labs/devops/tekton/#task-creation","title":"Task Creation","text":"
  • Create the below yaml files.
  • The following snippet shows what a Tekton Task YAML looks like:
  • Create the file task-test.yaml

    apiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\nname: java-test\nspec:\nparams:\n    - name: url\n    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n    - name: revision\n    default: master\nsteps:\n    - name: git-clone\n    image: alpine/git\n    script: |\n        git clone -b $(params.revision) --depth 1 $(params.url) /source\n    volumeMounts:\n        - name: source\n        mountPath: /source\n    - name: test\n    image: maven:3.3-jdk-8\n    workingdir: /source\n    script: |\n        mvn test\n        echo \"tests passed with rc=$?\"\n    volumeMounts:\n        - name: m2-repository\n        mountPath: /root/.m2\n        - name: source\n        mountPath: /source\nvolumes:\n    - name: m2-repository\n    emptyDir: {}\n    - name: source\n    emptyDir: {}\n

  • Each Task has the following:

  • name - the unique name using which the task can be referred
    • name - the name of the parameter
    • description - the description of the parameter
    • default - the default value of parameter
  • Note: The TaskRun or PipelineRun could override the parameter values, if no parameter value is passed then the default value will be used.

  • steps - One or more sub-tasks that will be executed in the defined order. The step has all the attributes like a Pod spec

  • volumes - the task can also mount external volumes using the volumes attribute.
  • The parameters that were part of the spec inputs params can be used in the steps using the notation $(<variable-name>).
"},{"location":"labs/devops/tekton/#task-deploy","title":"Task Deploy","text":"
  • The application test task could be created using the command:

    oc apply -f task-test.yaml -n $NAMESPACE\n

  • We will use the Tekton cli to inspect the created resources

    tkn task ls -n $NAMESPACE\n

  • The above command should list one Task as shown below:

    NAME        AGE\njava-test   22 seconds ago\n

"},{"location":"labs/devops/tekton/#taskrun","title":"TaskRun","text":"
  • The TaskRun is used to run a specific task independently. In the following section we will run the build-app task created in the previous step
"},{"location":"labs/devops/tekton/#taskrun-creation","title":"TaskRun Creation","text":"
  • The following snippet shows what a Tekton TaskRun YAML looks like:
  • Create the file taskrun-test.yaml
    apiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\ngenerateName: test-task-run-\nspec:\ntaskRef:\n    name: java-test\nparams:\n    - name: url\n    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n
  • generateName - since the TaskRun can be run many times, in order to have unqiue name across the TaskRun ( helpful when checking the TaskRun history) we use this generateName instead of name. When Kubernetes sees generateName it will generate unquie set of characters and suffix the same to build-app-, similar to how pod names are generated
  • taskRef - this is used to refer to the Task by its name that will be run as part of this TaskRun. In this example we use build-app Task.
  • As described in the earlier section that the Task inputs and outputs could be overridden via TaskRun.
  • params - this are the parameter values that are passed to the task
  • The application test task(java-maven-test) could be run using the command:
    kubectl create -f taskrun-test.yaml -n $NAMESPACE \n
  • Note - As tasks will use generated name, never use oc apply -f taskrun-test.yaml
  • We will use the Tekton cli to inspect the created resources:

    tkn tr ls -n $NAMESPACE\n
    The above command should list one TaskRun as shown below:
    NAME                       STARTED        DURATION   STATUS\ntest-task-run-q6s8c        1 minute ago   ---        Running(Pending)\n
    Note - It will take few seconds for the TaskRun to show status as Running as it needs to download the container images.

  • To check the logs of the Task Run using the tkn:

    tkn tr logs -f --last -n $NAMESPACE\n
    Note - Each task step will be run within a container of its own. The -f or -a allows to tail the logs from all the containers of the task. For more options run tkn tr logs --help

  • If you see the TaskRun status as Failed or Error use the following command to check the reason for error:
    tkn tr describe --last -n $NAMESPACE\n
  • If it is successful, you will see something like below.
    tkn tr ls -n $NAMESPACE\n
    The above command should list one TaskRun as shown below:
    NAME                  STARTED          DURATION     STATUS\ntest-task-run   47 seconds ago   34 seconds   Succeeded\n
"},{"location":"labs/devops/tekton/#creating-additional-tasks-and-deploying-them","title":"Creating additional tasks and deploying them","text":"
  • Create a Task to build a container image and push to the registry
  • This task will be later used by the pipeline.
  • Download the task file task-buildah.yaml to build the image, push the image to the registy:
  • Create the buildah Task using the file and the command:
    oc apply -f task-buildah.yaml -n $NAMESPACE\n
  • Use the Tekton cli to inspect the created resources
    tkn task ls -n $NAMESPACE\n
  • The above command should list one Task as shown below:

    NAME              AGE\nbuildah            4 seconds ago\njava-test         46 minutes ago\n

  • Create an environment variable for location to push the image to be build. Replace NAMESPACE for the dockerhub username, or IBM CR Namespace

    export REGISTRY_SERVER=image-registry.openshift-image-registry.svc:5000\nexport IMAGE_URL=${REGISTRY_SERVER}/${NAMESPACE}/cloudnative_sample_app\necho IMAGE_URL=${IMAGE_URL}\n

  • Lets create a Task Run for buildah Task using the tkn CLI passing the inputs, outputs and service account.

    tkn task start buildah --showlog \\\n-p image=${IMAGE_URL} \\\n-p url=https://github.com/ibm-cloud-architecture/cloudnative_sample_app \\\n-s pipeline \\\n-n $NAMESPACE\n
    The task will start and logs will start printing automatically
    Taskrun started: buildah-run-vvrg2\nWaiting for logs to be available...\n

  • Verify the status of the Task Run

    tkn tr ls -n $NAMESPACE\n
    Output should look like this
    NAME                  STARTED          DURATION     STATUS\nbuildah-run-zbsrv      2 minutes ago    1 minute     Succeeded\n

  • To clean up all Pods associated with all Task Runs, delete all the task runs resources
    oc delete taskrun --all -n $NAMESPACE\n
  • (Optional) Instead of starting the Task via tkn task start you could also use yaml TaskRun, create a file taskrun.yaml
    apiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\ngenerateName: buildah-task-run-\nspec:\nserviceAccountName: pipeline\ntaskRef:\n    name: buildah\nparams:\n    - name: url\n    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n    - name: image\n    value: image-registry.openshift-image-registry.svc:5000/tekton-demo/cloudnative_sample_app\n
    Then create the TaskRun with
    oc create -f taskrun-buildah.yaml -n $NAMESPACE\n
    Follow the logs with:
    tkn tr logs -f -n $NAMESPACE\n
"},{"location":"labs/devops/tekton/#pipelines","title":"Pipelines","text":""},{"location":"labs/devops/tekton/#pipeline-creation","title":"Pipeline Creation","text":"
  • Pipelines allows to start multiple Tasks, in parallel or in a certain order

  • Create the file pipeline.yaml, the Pipeline contains two Tasks

    apiVersion: tekton.dev/v1beta1\nkind: Pipeline\nmetadata:\nname: test-build\nspec:\nparams:\n    - name: repo-url\n    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n    - name: revision\n    default: master\n    - name: image-server\n    default: image-registry.openshift-image-registry.svc:5000\n    - name: image-namespace\n    default: tekton-demo\n    - name: image-repository\n    default: cloudnative_sample_app\ntasks:\n    - name: test\n    taskRef:\n        name: java-test\n    params:\n        - name: url\n        value: $(params.repo-url)\n        - name: revision\n        value: $(params.revision)\n    - name: build\n    runAfter: [test]\n    taskRef:\n        name: buildah\n    params:\n        - name: image\n        value: $(params.image-server)/$(params.image-namespace)/$(params.image-repository)\n        - name: url\n        value: $(params.repo-url)\n        - name: revision\n        value: $(params.revision)\n

  • Pipeline defines a list of Tasks to execute in order, while also indicating if any outputs should be used as inputs of a following Task by using the from field and also indicating the order of executing (using the runAfter and from fields). The same variable substitution you used in Tasks is also available in a Pipeline.

  • Create the Pipeline using the command:
    oc apply -f pipeline.yaml -n $NAMESPACE\n
  • Use the Tekton cli to inspect the created resources
    tkn pipeline ls -n $NAMESPACE\n
    The above command should list one Pipeline as shown below:
    NAME              AGE              LAST RUN   STARTED   DURATION   STATUS\ntest-build-push   31 seconds ago   ---        ---       ---        ---\n
"},{"location":"labs/devops/tekton/#pipelinerun","title":"PipelineRun","text":""},{"location":"labs/devops/tekton/#pipelinerun-creation","title":"PipelineRun Creation","text":"
  • To execute the Tasks in the Pipeline, you must create a PipelineRun. Creation of a PipelineRun will trigger the creation of TaskRuns for each Task in your pipeline.
  • Create the file pipelinerun.yaml
    apiVersion: tekton.dev/v1alpha1\nkind: PipelineRun\nmetadata:\ngenerateName: test-build-run-\nspec:\nserviceAccountName: pipeline\npipelineRef:\n    name: test-build\nparams:\n    - name: image-server\n    value: image-registry.openshift-image-registry.svc:5000\n    - name: image-namespace\n    value: tekton-demo\n
    serviceAccount - it is always recommended to have a service account associated with PipelineRun, which can then be used to define fine grained roles.
  • Create the PipelineRun using the command:
    oc create -f pipelinerun.yaml -n $NAMESPACE\n
  • We will use the Tekton cli to inspect the created resources

    tkn pipelinerun ls -n $NAMESPACE\n

  • The above command should list one PipelineRun as shown below:

    NAME                        STARTED         DURATION   STATUS\ntest-build-push-run-c7zgv   8 seconds ago   ---        Running\n

  • Wait for few minutes for your pipeline to complete all the tasks. If it is successful, you will see something like below.

    tkn pipeline ls -n $NAMESPACE\n
    NAME              AGE              LAST RUN                    STARTED         DURATION    STATUS\ntest-build-push   33 minutes ago   test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded\n

  • Run again the pipeline ls command

    tkn pipelinerun ls -n $NAMESPACE\n
    NAME                        STARTED         DURATION    STATUS\ntest-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded\n
    If it is successful, go to your container registry account and verify if you have the cloudnative_sample_app image pushed.

  • (Optional) Run the pipeline again using the tkn CLI

    tkn pipeline start test-build --showlog \\\n-s pipeline \\\n-n $NAMESPACE\n

  • (Optional) Re-run the pipeline using last pipelinerun values
    tkn pipeline start test-build-push --last -n $NAMESPACE\n
"},{"location":"labs/devops/tekton/#deploy-application","title":"Deploy Application","text":"
  • Create a deployment
    oc create deployment cloudnative --image=${IMAGE_URL} -n $NAMESPACE\n
  • Verify if the pods are running:
    oc get pods -l app=cloudnative -n $NAMESPACE\n
  • Expose the deployment as a service
    oc expose deployment cloudnative --port=9080 -n $NAMESPACE\n
  • Expose the service as a route
    oc expose service cloudnative -n $NAMESPACE\n
  • Now access the compose the URL of the App using IP and NodePort
    export APP_URL=\"$(oc get route cloudnative --template 'http://{{.spec.host}}')/greeting?name=Carlos\"\necho APP_URL=$APP_URL\n
    http://cloudnative-tekton-demo.apps-crc.testing/greeting?name=Carlos\n
  • Now access the app from terminal or browser
    curl $APP_URL\n
    Output should be
    {\"id\":4,\"content\":\"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)\"}\n
    open $APP_URL\n
"},{"location":"labs/devops/tekton/#prerequisites_1","title":"Prerequisites","text":"

Make sure your environment is properly setup.

Follow the instructions here

"},{"location":"labs/devops/tekton/#setup_1","title":"SetUp","text":""},{"location":"labs/devops/tekton/#tekton-cli-installation_1","title":"Tekton CLI Installation","text":"
  • Tekton CLI is command line utility used to interact with the Tekton resources.

  • Follow the instructions on the tekton CLI github repository https://github.com/tektoncd/cli#installing-tkn

  • For MacOS for example you can use brew

    brew install tektoncd-cli\n

  • Verify the Tekton cli
    tkn version\n
  • The command should show a result like:
    $ tkn version\nClient version: 0.10.0\n
  • If you already have the tkn install you can upgrade running
    brew upgrade tektoncd/tools/tektoncd-cli\n
"},{"location":"labs/devops/tekton/#tekton-pipelines-installation_1","title":"Tekton Pipelines Installation","text":"
  • To deploy the Tekton pipelines:
    kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.13.2/release.yaml\n
  • Note: It will take few mins for the Tekton pipeline components to be installed, you an watch the status using the command:
    kubectl get pods -n tekton-pipelines -w\n
    You can use Ctrl+c to terminate the watch
  • A successful deployment of Tekton pipelines will show the following pods:
    NAME                                         READY   STATUS    RESTARTS   AGE\ntekton-pipelines-controller-9b8cccff-j6hvr   1/1     Running   0          2m33s\ntekton-pipelines-webhook-6fc9d4d9b6-kpkp7    1/1     Running   0          2m33s\n
"},{"location":"labs/devops/tekton/#tekton-dashboard-installation-optional","title":"Tekton Dashboard Installation (Optional)","text":"
  • To deploy the Tekton dashboard:
    kubectl apply --filename https://github.com/tektoncd/dashboard/releases/download/v0.7.0/tekton-dashboard-release.yaml\n
  • Note: It will take few mins for the Tekton dashboard components to be installed, you an watch the status using the command:
    kubectl get pods -n tekton-pipelines -w\n
    You can use Ctrl+c to terminate the watch
  • A successful deployment of Tekton pipelines will show the following pods:
    NAME                                           READY   STATUS    RESTARTS   AGE\ntekton-dashboard-59c7fbf49f-79f7q              1/1     Running   0          50s\ntekton-pipelines-controller-6b7f7cf7d8-r65ps   1/1     Running   0          15m\ntekton-pipelines-webhook-7bbd8fcc45-sfgxs      1/1     Running   0          15m\n
  • Access the dashboard as follows:
    kubectl --namespace tekton-pipelines port-forward svc/tekton-dashboard 9097:9097\n
    You can access the web UI at http://localhost:9097 .
"},{"location":"labs/devops/tekton/#create-target-namespace_1","title":"Create Target Namespace","text":"
  • Set the environment variable NAMESPACE to tekton-demo, if you open a new terminal remember to set this environment again
    export NAMESPACE=tekton-demo\n
  • Create a the namespace using the variable NAMESPACE
    kubectl create namespace $NAMESPACE\n
"},{"location":"labs/devops/tekton/#tasks_1","title":"Tasks","text":""},{"location":"labs/devops/tekton/#task-creation_1","title":"Task Creation","text":"
  • Create the below yaml files.
  • The following snippet shows what a Tekton Task YAML looks like:
  • Create the file task-test.yaml

    apiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\nname: java-test\nspec:\nparams:\n    - name: url\n    - name: revision\n    default: master\nsteps:\n    - name: git-clone\n    image: alpine/git\n    script: |\n        git clone -b $(params.revision) --depth 1 $(params.url) /source\n    volumeMounts:\n        - name: source\n        mountPath: /source\n    - name: test\n    image: maven:3.3-jdk-8\n    workingdir: /source\n    script: |\n        mvn test\n        echo \"tests passed with rc=$?\"\n    volumeMounts:\n        - name: m2-repository\n        mountPath: /root/.m2\n        - name: source\n        mountPath: /source\nvolumes:\n    - name: m2-repository\n    emptyDir: {}\n    - name: source\n    emptyDir: {}\n

  • Each Task has the following:

  • name - the unique name using which the task can be referred
    • name - the name of the parameter
    • description - the description of the parameter
    • default - the default value of parameter
  • Note: The TaskRun or PipelineRun could override the parameter values, if no parameter value is passed then the default value will be used.

  • steps - One or more sub-tasks that will be executed in the defined order. The step has all the attributes like a Pod spec

  • volumes - the task can also mount external volumes using the volumes attribute.
  • The parameters that were part of the spec inputs params can be used in the steps using the notation $(<variable-name>).
"},{"location":"labs/devops/tekton/#task-deploy_1","title":"Task Deploy","text":"
  • The application test task could be created using the command:

    kubectl apply -f task-test.yaml -n $NAMESPACE\n

  • We will use the Tekton cli to inspect the created resources

    tkn task ls -n $NAMESPACE\n

  • The above command should list one Task as shown below:

    NAME        AGE\njava-test   22 seconds ago\n

"},{"location":"labs/devops/tekton/#taskrun_1","title":"TaskRun","text":"
  • The TaskRun is used to run a specific task independently. In the following section we will run the build-app task created in the previous step
"},{"location":"labs/devops/tekton/#taskrun-creation_1","title":"TaskRun Creation","text":"
  • The following snippet shows what a Tekton TaskRun YAML looks like:
  • Create the file taskrun-test.yaml
    apiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\ngenerateName: test-task-run-\nspec:\ntaskRef:\n    name: java-test\nparams:\n    - name: url\n    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n
  • generateName - since the TaskRun can be run many times, in order to have unqiue name across the TaskRun ( helpful when checking the TaskRun history) we use this generateName instead of name. When Kubernetes sees generateName it will generate unquie set of characters and suffix the same to build-app-, similar to how pod names are generated
  • taskRef - this is used to refer to the Task by its name that will be run as part of this TaskRun. In this example we use build-app Task.
  • As described in the earlier section that the Task inputs and outputs could be overridden via TaskRun.
  • params - this are the parameter values that are passed to the task
  • The application test task(java-maven-test) could be run using the command:
    kubectl create -n $NAMESPACE -f taskrun-test.yaml\n
  • Note - As tasks will use generated name, never use kubectl apply -f taskrun-test.yaml
  • We will use the Tekton cli to inspect the created resources:

    tkn tr ls -n $NAMESPACE\n
    The above command should list one TaskRun as shown below:
    NAME                       STARTED        DURATION   STATUS\ntest-task-run-q6s8c        1 minute ago   ---        Running(Pending)\n
    Note - It will take few seconds for the TaskRun to show status as Running as it needs to download the container images.

  • To check the logs of the Task Run using the tkn:

    tkn tr logs -f -a -n $NAMESPACE\n
    Note - Each task step will be run within a container of its own. The -f or -a allows to tail the logs from all the containers of the task. For more options run tkn tr logs --help

  • If you see the TaskRun status as Failed or Error use the following command to check the reason for error:
    tkn tr describe --last -n $NAMESPACE\n
  • If it is successful, you will see something like below.
    tkn tr ls -n $NAMESPACE\n
    The above command should list one TaskRun as shown below:
    NAME                  STARTED          DURATION     STATUS\ntest-task-run-q6s8c   47 seconds ago   34 seconds   Succeeded\n
"},{"location":"labs/devops/tekton/#creating-additional-tasks-and-deploying-them_1","title":"Creating additional tasks and deploying them","text":"
  • Create a Task to build a container image and push to the registry
  • This task will be later used by the pipeline.
  • Download the task file task-buildah.yaml to build the image, push the image to the registy:
  • Create task buildah
  • Create the buildah Task using the file and the command:
    kubectl apply -f task-buildah.yaml -n $NAMESPACE\n
  • Use the Tekton cli to inspect the created resources
    tkn task ls -n $NAMESPACE\n
  • The above command should list one Task as shown below:

    NAME              AGE\nbuildah            4 seconds ago\njava-test         46 minutes ago\n

  • To access the container registry, create the required secret as follows.

  • If using IBM Container registry use iamapikey for REGISTRY_USERNAME and get a API Key for REGISTRY_PASSWORD, use the domain name for the region IBM CR service like us.icr.io
  • Create the environment variables to be use, replace with real values and include the single quotes:

    export REGISTRY_USERNAME='<REGISTRY_USERNAME>'\n
    export REGISTRY_PASSWORD='<REGISTRY_PASSWORD>'\n
    export REGISTRY_SERVER='docker.io'\n

  • Run the following command to create a secret regcred in the namespace NAMESPACE

    kubectl create secret docker-registry regcred \\\n--docker-server=${REGISTRY_SERVER} \\\n--docker-username=${REGISTRY_USERNAME} \\\n--docker-password=${REGISTRY_PASSWORD} \\\n-n ${NAMESPACE}\n

    Before creating, replace the values as mentioned above. Note: If your docker password contains special characters in it, please enclose the password in double quotes or place an escape character before each special character.

    • (Optional) Only if you have problems with the credentials you can recreate it, but you have to deleted first
      kubectl delete secret regcred -n $NAMESPACE\n
  • Before we run the Task using TaskRun let us create the Kubernetes service account and attach the needed permissions to the service account, the following Kubernetes resource defines a service account called pipeline in namespace $NAMESPACE who will have administrative role within the $NAMESPACE namespace.

  • Create the file sa.yaml
    apiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: pipeline\nsecrets:\n- name: regcred\n
  • Create sa role as follows:

    kubectl create -n $NAMESPACE -f sa.yaml\n

  • Create an environment variable for location to push the image to be build. Replace NAMESPACE for the dockerhub username, or IBM CR Namespace

    export NAMESPACE='<REGISTRY_NAMESPACE>'\nexport IMAGE_URL=${REGISTRY_SERVER}/${REGISTRY_NAMESPACE}/cloudnative_sample_app\n

  • Lets create a Task Run for buildah Task using the tkn CLI passing the inputs, outputs and service account.

    tkn task start buildah --showlog \\\n-p url=https://github.com/ibm-cloud-architecture/cloudnative_sample_app \\\n-p image=${IMAGE_URL} \\\n-s pipeline \\\n-n $NAMESPACE\n

    The task will start and logs will start printing automatically

    Taskrun started: buildah-run-vvrg2\nWaiting for logs to be available...\n

  • Verify the status of the Task Run

    tkn tr ls -n $NAMESPACE\n
    Output should look like this
    NAME                  STARTED          DURATION     STATUS\nbuildah-run-zbsrv      2 minutes ago    1 minute     Succeeded\n

  • To clean up all Pods associated with all Task Runs, delete all the task runs resources
    kubectl delete taskrun --all -n $NAMESPACE\n
  • (Optional) Instead of starting the Task via tkn task start you could also use yaml TaskRun, create a file taskrun-buildah.yaml Make sure update value for parameter image with your registry info.
    apiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\ngenerateName: buildah-task-run-\nspec:\nserviceAccountName: pipeline\ntaskRef:\n    name: buildah\nparams:\n    - name: url\n    value: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n    - name: image\n    value: docker.io/csantanapr/cloudnative_sample_app\n
    Then create the TaskRun with generateName
    kubectl create -f taskrun-buildah.yaml -n $NAMESPACE\n
    Follow the logs with:
    tkn tr logs --last -f -n $NAMESPACE\n
"},{"location":"labs/devops/tekton/#pipelines_1","title":"Pipelines","text":""},{"location":"labs/devops/tekton/#pipeline-creation_1","title":"Pipeline Creation","text":"
  • Pipelines allows to start multiple Tasks, in parallel or in a certain order

  • Create the file pipeline.yaml, the Pipeline contains two Tasks

    apiVersion: tekton.dev/v1beta1\nkind: Pipeline\nmetadata:\nname: test-build\nspec:\nparams:\n    - name: repo-url\n    default: https://github.com/ibm-cloud-architecture/cloudnative_sample_app\n    - name: revision\n    default: master\n    - name: image-server\n    - name: image-namespace\n    - name: image-repository\n    default: cloudnative_sample_app\ntasks:\n    - name: test\n    taskRef:\n        name: java-test\n    params:\n        - name: url\n        value: $(params.repo-url)\n        - name: revision\n        value: $(params.revision)\n    - name: build\n    runAfter: [test]\n    taskRef:\n        name: buildah\n    params:\n        - name: image\n        value: $(params.image-server)/$(params.image-namespace)/$(params.image-repository)\n        - name: url\n        value: $(params.repo-url)\n        - name: revision\n        value: $(params.revision)\n

  • Pipeline defines a list of Tasks to execute in order, while also indicating if any outputs should be used as inputs of a following Task by using the from field and also indicating the order of executing (using the runAfter and from fields). The same variable substitution you used in Tasks is also available in a Pipeline.

  • Create the Pipeline using the command:
    kubectl apply -f pipeline.yaml -n $NAMESPACE\n
  • Use the Tekton cli to inspect the created resources
    tkn pipeline ls -n $NAMESPACE\n
    The above command should list one Pipeline as shown below:
    NAME              AGE              LAST RUN   STARTED   DURATION   STATUS\ntest-build-push   31 seconds ago   ---        ---       ---        ---\n
"},{"location":"labs/devops/tekton/#pipelinerun_1","title":"PipelineRun","text":""},{"location":"labs/devops/tekton/#pipelinerun-creation_1","title":"PipelineRun Creation","text":"
  • To execute the Tasks in the Pipeline, you must create a PipelineRun. Creation of a PipelineRun will trigger the creation of TaskRuns for each Task in your pipeline.
  • Create the file pipelinerun.yaml replace the values for image-server and image-namespace with your own.
    apiVersion: tekton.dev/v1beta1\nkind: PipelineRun\nmetadata:\ngenerateName: test-build-run-\nspec:\nserviceAccountName: pipeline\npipelineRef:\n    name: test-build\nparams:\n    - name: image-server\n    value: us.icr.io\n    - name: image-namespace\n    value: student01-registry\n
    serviceAccount - it is always recommended to have a service account associated with PipelineRun, which can then be used to define fine grained roles. Replace the values for image-server and image-namespace
  • Create the PipelineRun using the command:
    kubectl create -f pipelinerun.yaml -n $NAMESPACE\n
  • We will use the Tekton cli to inspect the created resources

    tkn pipelinerun ls -n $NAMESPACE\n

  • The above command should list one PipelineRun as shown below:

    NAME                        STARTED         DURATION   STATUS\ntest-build-push-run-c7zgv   8 seconds ago   ---        Running\n

  • Get the logs of the pipeline using the following command

    tkn pipelinerun logs --last -f\n

  • Wait for few minutes for your pipeline to complete all the tasks. If it is successful, you will see something like below.
    tkn pipeline ls -n $NAMESPACE\n
    NAME              AGE              LAST RUN                    STARTED         DURATION    STATUS\ntest-build-push   33 minutes ago   test-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded\n
  • Run again the pipeline ls command

    tkn pipelinerun ls -n $NAMESPACE\n
    NAME                        STARTED         DURATION    STATUS\ntest-build-push-run-c7zgv   2 minutes ago   2 minutes   Succeeded\n
    If it is successful, go to your container registry account and verify if you have the cloudnative_sample_app image pushed.

  • (Optional) Run the pipeline again using the tkn CLI

    tkn pipeline start test-build --last -n $NAMESPACE\n

  • (Optional) Re-run the pipeline using last pipelinerun values
    tkn pipeline start test-build-push --last -f -n $NAMESPACE\n
"},{"location":"labs/devops/tekton/#deploy-application_1","title":"Deploy Application","text":"
  • Add the imagePullSecret to the default Service Account
    kubectl patch sa default -p '\"imagePullSecrets\": [{\"name\": \"regcred\" }]' -n $NAMESPACE\n
  • Create a deployment
    kubectl create deployment cloudnative --image=${IMAGE_URL} -n $NAMESPACE\n
  • Verify if the pods are running:
    kubectl get pods -l app=cloudnative -n $NAMESPACE\n
  • Expose the deployment
    kubectl expose deployment cloudnative --type=NodePort --port=9080 -n $NAMESPACE\n
  • Now access the compose the URL of the App using IP and NodePort
    export APP_EXTERNAL_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type==\"ExternalIP\")].address}')\nexport APP_NODEPORT=$(kubectl get svc cloudnative -n $NAMESPACE -o jsonpath='{.spec.ports[0].nodePort}')\nexport APP_URL=\"http://${APP_EXTERNAL_IP}:${APP_NODEPORT}/greeting?name=Carlos\"\necho APP_URL=$APP_URL\n
    http://192.168.64.30:30632//greeting?name=Carlos\n
  • Now access the app from terminal or browser
    curl $APP_URL\n
    Output should be
    {\"id\":4,\"content\":\"Welcome to Cloudnative bootcamp !!! Hello, Carlos :)\"}\n
    open $APP_URL\n
"},{"location":"labs/kubernetes/lab-solutions/","title":"Lab Solutions","text":"
  • Lab 1

  • Lab 2

  • Lab 3

  • Lab 4

  • Lab 5

  • Lab 6

  • Lab 7

  • Lab 8

  • Lab 9

  • Lab 10

"},{"location":"labs/kubernetes/ingress-iks/","title":"Kubernetes Lab Ingress Controller IBM Free Kubernetes cluster","text":"

The IBM Kubernetes service free clusters consist of a single worker node with 2 CPU and 4 GB of memory for experimenting with Kubernetes. Unlike the fee-based service, these clusters do not include capabilities for application load balancing using ingress out-of-the-box.

"},{"location":"labs/kubernetes/ingress-iks/#prerequisites","title":"Prerequisites","text":"
  • Free IBM Kubernetes Cluster (IKS) - upgrade your account from Lite plan to create one. In the example commands, we'll assume that this cluster is named mycluster
  • kubectl - match your cluster API version
  • Log in to IBM Cloud and configure kubectl using the ibmcloud ks cluster config --cluster mycluster command
"},{"location":"labs/kubernetes/ingress-iks/#components","title":"Components","text":"

On the IKS cluster, you will install helm charts for a nginx ingress controller from NGINX. This lab already provides the templated yaml files so there is no need to use helm cli.

"},{"location":"labs/kubernetes/ingress-iks/#set-up-the-ingress-controller","title":"Set up the ingress controller","text":"

Only do this on a free IKS instance These steps assume facts that only apply to free IKS instances:

  • a single worker where the cluster administrator can create pods that bind to host ports
  • no pre-existing ingress controller or application load balancer

Using the following steps with a paid instance can cause issues. See the IBM Cloud containers documentation for information on exposing applications with the ingress/alb services for paid clusters. You have been warned

  1. Install the NGINX ingress controller with helm using a daemonset and no service resource (which will result in a single pod that binds to ports 80 and 443 on the worker node and will skip creation of a ClusterIP, LoadBalancer, or NodePort for the daemonset).

    kubectl apply -f https://cloudnative101.dev/yamls/ingress-controller/iks-ingress-v1.7.1.yaml\n

  2. You can use free domain .nip.io to get a domain name using one of the IP Address of your worker nodes. Run this command to set your DOMAIN

    export DOMAIN=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type==\"ExternalIP\")].address}').nip.io\necho $DOMAIN\n

  3. You can test the ingress controller using the $DOMAIN:

    curl -I http://$DOMAIN\n
    HTTP/1.1 404 Not Found\nServer: nginx/1.17.10\n...\n

    A 404 is expected at this point because unlike the kubernetes nginx ingress, the NGINX version of the ingress controller does not create a default backend deployment.

  4. To use the ingress controller deploy a sample application, expose a service.

    kubectl create deployment web --image=bitnami/nginx\nkubectl expose deployment web --name=web --port 8080\n

  5. Now create an Ingress resource

    cat <<EOF | kubectl apply -f -\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n  name: web\n  labels:\n    app: web\nspec:\n  rules:\n    - host: web.$DOMAIN\n      http:\n        paths:\n          - path: /\n            backend:\n              serviceName: web\n              servicePort: 8080\nEOF\necho \"Access your web app at http://web.$DOMAIN\"\n

  6. List the created ingress

    kubectl get ingress web\n

  7. Access your web application

    curl http://web.$DOMAIN\n
    The output prints the html
    <p><em>Thank you for using nginx.</em></p>\n

  8. Delete all the resources created

    kubectl delete deployment,svc,ingress -l app=web\n

"},{"location":"labs/kubernetes/lab1/","title":"Kubernetes Lab 1 - Pod Creation","text":""},{"location":"labs/kubernetes/lab1/#problem","title":"Problem","text":"
  • Write a pod definition named yoda-service-pod.yml Then create a pod in the cluster using this definition to make sure it works.

The specificationsof this pod are as follows:

  • Use the bitnami/nginx container image.
  • The container needs a containerPort of 80.
  • Set the command to run as nginx
  • Pass in the -g daemon off; -q args to run nginx in quiet mode.
  • Create the pod in the web namespace.
"},{"location":"labs/kubernetes/lab1/#verification","title":"Verification","text":"

When you have completed this lab, use the following commands to validate your solution. The 'get pods' command will

kubectl get pods -n web kubectl describe pod nginx -n web

"},{"location":"labs/kubernetes/lab1/solution/","title":"Kubernetes Lab 1 - Pod Creation","text":""},{"location":"labs/kubernetes/lab1/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: nginx\n  namespace: web\nspec:\n  containers:\n  - name: nginx\n    image: nginx\n    command: [\"nginx\"]\n    args: [\"-g\", \"daemon off;\", \"-q\"]\n    ports:\n    - containerPort: 80\n
"},{"location":"labs/kubernetes/lab10/","title":"Kubernetes Lab 10 - Persistent Volumes","text":""},{"location":"labs/kubernetes/lab10/#problem","title":"Problem","text":"

The death star plans can't be lost no matter what happens so we need to make sure we protect them at all costs.

In order to do that you will need to do the following:

Create a PersistentVolume:

  • The PersistentVolume should be named postgresql-pv.

  • The volume needs a capacity of 1Gi.

  • Use a storageClassName of localdisk.

  • Use the accessMode ReadWriteOnce.

  • Store the data locally on the node using a hostPath volume at the location /mnt/data.

Create a PersistentVolumeClaim:

  • The PersistentVolumeClaim should be named postgresql-pv-claim.

  • Set a resource request on the claim for 500Mi of storage.

  • Use the same storageClassName and accessModes as the PersistentVolume so that this claim can bind to the PersistentVolume.

Create a Postgresql Pod configured to use the PersistentVolumeClaim: - The Pod should be named postgresql-pod.

  • Use the image bitnami/postgresql.

  • Expose the containerPort 5432.

  • Set an environment variable called MYSQL_ROOT_PASSWORD with the value password.

  • Add the PersistentVolumeClaim as a volume and mount it to the container at the path /bitnami/postgresql/.

"},{"location":"labs/kubernetes/lab10/solution/","title":"Kubernetes Lab 10 - Persistent Volumes","text":""},{"location":"labs/kubernetes/lab10/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: postgresql-pv\nspec:\n  storageClassName: localdisk\n  capacity:\n    storage: 1Gi\n  accessModes:\n    - ReadWriteOnce\n  hostPath:\n    path: \"/mnt/data\"\n
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: postgresql-pv-claim\nspec:\n  storageClassName: localdisk\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 500Mi\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: postgresql-pod\nspec:\n  containers:\n  - name: postgresql\n    image: bitnami/postgresql\n    ports:\n    - containerPort: 5432\n    env:\n    - name: MYSQL_ROOT_PASSWORD\n      value: password\n    volumeMounts:\n    - name: sql-storage\n      mountPath: /bitnami/postgresql/\n  volumes:\n  - name: sql-storage\n    persistentVolumeClaim:\n      claimName: postgresql-pv-claim\n

verify via ls /mnt/data on node

"},{"location":"labs/kubernetes/lab2/","title":"Kubernetes Lab 2 - Pod Configuration","text":""},{"location":"labs/kubernetes/lab2/#problem","title":"Problem","text":"
  • Create a pod definition named yoda-service-pod.yml, and then create a pod in the cluster using this definition to make sure it works.

The specifications are as follows:

  • The current image for the container is bitnami/nginx. You do not need a custom command or args.
  • There is some configuration data the container will need:
    • yoda.baby.power=100000000
    • yoda.strength=10
  • It will expect to find this data in a file at /etc/yoda-service/yoda.cfg. Store the configuration data in a ConfigMap called yoda-service-config and provide it to the container as a mounted volume.
  • The container should expect to use 64Mi of memory and 250m CPU (use resource requests).
  • The container should be limited to 128Mi of memory and 500m CPU (use resource limits).
  • The container needs access to a database password in order to authenticate with a backend database server. The password is 0penSh1ftRul3s!. It should be stored as a Kubernetes secret called yoda-db-password and passed to the container as an environment variable called DB_PASSWORD.
  • The container will need to access the Kubernetes API using the ServiceAccount yoda-svc. Create the service account if it doesn't already exist, and configure the pod to use it.
"},{"location":"labs/kubernetes/lab2/#verification","title":"Verification","text":"

To verify your setup is complete, check /etc/yoda-service for the yoda.cfg file and use the cat command to check it's contents.

kubectl exec -it yoda-service /bin/bash\ncd /etc/yoda-service\ncat yoda.cfg\n
"},{"location":"labs/kubernetes/lab2/solution/","title":"Kubernetes Lab 2 - Pod Configuration","text":""},{"location":"labs/kubernetes/lab2/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: yoda-service-config\ndata:\n  yoda.cfg: |-\n    yoda.baby.power=100000000\n    yoda.strength=10\n
apiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: yoda-svc\n
apiVersion: v1\nkind: Secret\nmetadata:\n  name: yoda-db-password\nstringData:\n  password: 0penSh1ftRul3s!\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: yoda-service\nspec:\n  serviceAccountName: yoda-svc\n  containers:\n  - name: yoda-service\n    image: bitnami/nginx\n    volumeMounts:\n      - name: config-volume\n        mountPath: /etc/yoda-service\n    env:\n    - name: DB_PASSWORD\n      valueFrom:\n        secretKeyRef:\n          name: yoda-db-password\n          key: password\n    resources:\n      requests:\n        memory: \"64Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n  volumes:\n  - name: config-volume\n    configMap:\n      name: yoda-service-config\n
"},{"location":"labs/kubernetes/lab3/","title":"Kubernetes Lab 3 - Manage Multiple Containers","text":""},{"location":"labs/kubernetes/lab3/#problem","title":"Problem","text":"

This service has already been packaged into a container image, but there is one special requirement: - The legacy app is hard-coded to only serve content on port 8989, but the team wants to be able to access the service using the standard port 80.

Your task is to build a Kubernetes pod that runs this legacy container and uses the ambassador design pattern to expose access to the service on port 80.

This setup will need to meet the following specifications:

  • The pod should have the name vader-service.
  • The vader-service pod should have a container that runs the legacy vader service image: ibmcase/millennium-falcon:1.
  • The vader-service pod should have an ambassador container that runs the haproxy:1.7 image and proxies incoming traffic on port 80 to the legacy service on port 8989 (the HAProxy configuration for this is provided below).
  • Port 80 should be exposed as a containerPort.

Note: You do not need to expose port 8989

  • The HAProxy configuration should be stored in a ConfigMap called vader-service-ambassador-config.
  • The HAProxy config should be provided to the ambassador container using a volume mount that places the data from the ConfigMap in a file at /usr/local/etc/haproxy/haproxy.cfg. haproxy.cfg should contain the following configuration data:
global\n    daemon\n    maxconn 256\n\ndefaults\n    mode http\n    timeout connect 5000ms\n    timeout client 50000ms\n    timeout server 50000ms\n\nlisten http-in\n    bind *:80\n    server server1 127.0.0.1:8989 maxconn 32\n

Once your pod is up and running, it's a good idea to test it to make sure you can access the service from within the cluster using port 80. In order to do this, you can create a busybox pod in the cluster, and then run a command to attempt to access the service from within the busybox pod.

Create a descriptor for the busybox pod called busybox.yml

apiVersion: v1\nkind: Pod\nmetadata:\n  name: busybox\nspec:\n  containers:\n  - name: myapp-container\n    image: radial/busyboxplus:curl\n    command: ['sh', '-c', 'while true; do sleep 3600; done']\n

Create the busybox testing pod.

kubectl apply -f busybox.yml\n

Use this command to access vader-service using port 80 from within the busybox pod.

kubectl exec busybox -- curl $(kubectl get pod vader-service -o=custom-columns=IP:.status.podIP --no-headers):80\n

If the service is working, you should get a message that the hyper drive of the millennium falcon needs repair.

Relevant Documentation: - Kubernetes Sidecar Logging Agent - Shared Volumes - Distributed System Toolkit Patterns

"},{"location":"labs/kubernetes/lab3/solution/","title":"Kubernetes Lab 3 - Manage Multiple Containers","text":""},{"location":"labs/kubernetes/lab3/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: vader-service-ambassador-config\ndata:\n  haproxy.cfg: |-\n    global\n        daemon\n        maxconn 256\n\n    defaults\n        mode http\n        timeout connect 5000ms\n        timeout client 50000ms\n        timeout server 50000ms\n\n    listen http-in\n        bind *:80\n        server server1 127.0.0.1:8775 maxconn 32\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: vader-service\nspec:\n  containers:\n  - name: millennium-falcon\n    image: ibmcase/millennium-falcon:1\n  - name: haproxy-ambassador\n    image: haproxy:1.7\n    ports:\n    - containerPort: 80\n    volumeMounts:\n    - name: config-volume\n      mountPath: /usr/local/etc/haproxy\n  volumes:\n  - name: config-volume\n    configMap:\n      name: vader-service-ambassador-config\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: busybox\nspec:\n  containers:\n  - name: myapp-container\n    image: radial/busyboxplus:curl\n    command: ['sh', '-c', 'while true; do sleep 3600; done']\n
kubectl exec busybox -- curl $(kubectl get pod vader-service -o=jsonpath='{.status.podIP}'):80\n
"},{"location":"labs/kubernetes/lab4/","title":"Kubernetes Lab 4 - Probes","text":""},{"location":"labs/kubernetes/lab4/#container-health-issues","title":"Container Health Issues","text":"

The first issue is caused by application instances entering an unhealthy state and responding to user requests with error messages. Unfortunately, this state does not cause the container to stop, so the Kubernetes cluster is not able to detect this state and restart the container. Luckily, the application has an internal endpoint that can be used to detect whether or not it is healthy. This endpoint is /healthz on port 8080.

  • Your first task will be to create a probe to check this endpoint periodically.
  • If the endpoint returns an error or fails to respond, the probe will detect this and the cluster will restart the container.
"},{"location":"labs/kubernetes/lab4/#container-startup-issues","title":"Container Startup Issues","text":"

Another issue is caused by new pods when they are starting up. The application takes a few seconds after startup before it is ready to service requests. As a result, some users are getting error message during this brief time.

  • To fix this, you will need to create another probe. To detect whether the application is ready, the probe should simply make a request to the root endpoint, /ready, on port 8080. If this request succeeds, then the application is ready.

  • Also set a initial delay of 5 seconds for the probes.

Here is the Pod yaml file, add the probes, then create the pod in the cluster to test it.

apiVersion: v1\nkind: Pod\nmetadata:\n  name: energy-shield-service\nspec:\n  containers:\n  - name: energy-shield\n    image: ibmcase/energy-shield:1\n
"},{"location":"labs/kubernetes/lab4/solution/","title":"Kubernetes Lab 4 - Probes","text":""},{"location":"labs/kubernetes/lab4/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: energy-shield-service\nspec:\n  containers:\n  - name: energy-shield\n    image: ibmcase/energy-shield:1\n    livenessProbe:\n      httpGet:\n        path: /healthz\n        port: 8080\n    readinessProbe:\n      httpGet:\n        path: /ready\n        port: 8080\n      initialDelaySeconds: 5\n
"},{"location":"labs/kubernetes/lab5/","title":"Kubernetes Lab 5 - Debugging","text":""},{"location":"labs/kubernetes/lab5/#problem","title":"Problem","text":"

The Hyper Drive isn't working and we need to find out why. Let's debug the hyper-drive deployment so that we can reach light speed again.

Here are some tips to help you solve the Hyper Drive:

  • Check the description of the deployment.
  • Get and save the logs of one of the broken pods.
  • Are the correct ports assigned.
  • Make sure your labels and selectors are correct.
  • Check to see if the Probes are correctly working.
  • To fix the deployment, save then modify the yaml file for redeployment.

Reset the environment:

minikube delete\nminikube start\n

Setup the environment:

kubectl apply -f https://raw.githubusercontent.com/ibm-cloud-architecture/learning-cloudnative-101/master/lab-setup/lab-5-debug-k8s-setup.yaml\n

"},{"location":"labs/kubernetes/lab5/#validate","title":"Validate","text":"

Once you get the Hyper Drive working again. Verify it by checking the endpoints.

kubectl get ep hyper-drive\n
"},{"location":"labs/kubernetes/lab5/solution/","title":"Kubernetes Lab 5 - Debugging","text":""},{"location":"labs/kubernetes/lab5/solution/#solution","title":"Solution","text":"

Check STATUS column for not Ready

    kubectl get pods --all-namespaces\n

Check the description of the deployment

kubectl describe deployment hyper-drive\n
Save logs for a broken pod

kubectl logs <pod name> -n <namespace> > /home/cloud_user/debug/broken-pod-logs.log\n

In the description you will see the following is wrong: - Selector and Label names do not match. - The Probe is TCP instead of HTTP Get. - The Service Port is 80 instead of 8080.

To fix probe, can't kubectl edit, need to delete and recreate the deployment

kubectl get deployment <deployment name> -n <namespace> -o yaml --export > hyper-drive.yml\n

Delete pod

kubectl delete deployment <deployment name> -n <namespace>\n
Can also use kubectl replace

Edit yaml, and apply

kubectl apply -f hyper-drive.yml -n <namespace>\n

Verify

kubectl get deployment <deployment name> -n <namespace>\n

"},{"location":"labs/kubernetes/lab6/","title":"Kubernetes Lab 6 - Rolling Updates","text":""},{"location":"labs/kubernetes/lab6/#problem","title":"Problem","text":"

Your company's developers have just finished developing a new version of their jedi-themed mobile game. They are ready to update the backend services that are running in your Kubernetes cluster. There is a deployment in the cluster managing the replicas for this application. The deployment is called jedi-deployment. You have been asked to update the image for the container named jedi-ws in this deployment template to a new version, bitnamy/nginx:1.18.1.

After you have updated the image using a rolling update, check on the status of the update to make sure it is working. If it is not working, perform a rollback to the previous state.

Setup environment

kubectl apply -f https://gist.githubusercontent.com/csantanapr/87df4292e94441617707dae5de488cf4/raw/cb515f7bae77a3f0e76fdc7f6aa0f4e89cc5fec7/lab-6-rolling-updates-setup.yaml\n

"},{"location":"labs/kubernetes/lab6/solution/","title":"Kubernetes Lab 6 - Rolling Updates","text":""},{"location":"labs/kubernetes/lab6/solution/#solution","title":"Solution","text":"

Update the deployment to the new version like so:

kubectl set image deployment/jedi-deployment jedi-ws=bitnamy/nginx:1.18.1 --record\n

Check the progress of the rolling update:

kubectl rollout status deployment/jedi-deployment\n

In another terminal window

kubectl get pods -w\n

Get a list of previous revisions.

kubectl rollout history deployment/jedi-deployment\n

Undo the last revision.

kubectl rollout undo deployment/jedi-deployment\n

Check the status of the rollout.

kubectl rollout status deployment/jedi-deployment\n

"},{"location":"labs/kubernetes/lab7/","title":"Kubernetes Lab 7 - Cron Jobs","text":""},{"location":"labs/kubernetes/lab7/#problem","title":"Problem","text":"

Your commander has a simple data process that is run periodically to check status. They would like to stop doing this manually in order to save time, so you have been asked to implement a cron job in the Kubernetes cluster to run this process. - Create a cron job called xwing-cronjob using the ibmcase/xwing-status:1.0 image. - Have the job run every second minute with the following cron expression: */2 * * * *. - Pass the argument /usr/sbin/xwing-status.sh to the container.

"},{"location":"labs/kubernetes/lab7/#verification","title":"Verification","text":"
  • Run kubectl get cronjobs.batch and LAST-SCHEDULE to see last time it ran
  • From a bash shell, run the following to see the logs for all jobs:
jobs=( $(kubectl get jobs --no-headers -o custom-columns=\":metadata.name\") )\necho -e \"Job \\t\\t\\t\\t Pod \\t\\t\\t\\t\\tLog\"\nfor job in \"${jobs[@]}\"\ndo\n   pod=$(kubectl get pods -l job-name=$job --no-headers -o custom-columns=\":metadata.name\")\n   echo -en \"$job \\t $pod \\t\"\n   kubectl logs $pod\ndone\n
"},{"location":"labs/kubernetes/lab7/solution/","title":"Kubernetes Lab 7 - Cron Jobs","text":""},{"location":"labs/kubernetes/lab7/solution/#solution","title":"Solution","text":"
apiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n  name: xwing-cronjob\nspec:\n  schedule: \"*/1 * * * *\"\n  jobTemplate:\n    spec:\n      template:\n        spec:\n          containers:\n          - name: xwing-status\n            image: ibmcase/xwing-status:1.0\n            args:\n            - /usr/sbin/xwing-status.sh\n          restartPolicy: OnFailure\n
kubectl get cronjob xwing-cronjob\n
"},{"location":"labs/kubernetes/lab8/","title":"Kubernetes Lab 8 - Services","text":""},{"location":"labs/kubernetes/lab8/#problem","title":"Problem","text":"

We have a jedi-deployment and yoda-deployment that need to communicate with others. The jedi needs to talk to the world(outside the cluster), while yoda only needs to talk to jedi council(others in the cluster).

"},{"location":"labs/kubernetes/lab8/#your-task","title":"Your Task","text":"
  • Examine the two deployments, and create two services that meet the following criteria:

jedi-svc - The service name is jedi-svc. - The service exposes the pod replicas managed by the deployment named jedi-deployment. - The service listens on port 80 and its targetPort matches the port exposed by the pods. - The service type is NodePort.

yoda-svc - The service name is yoda-svc. - The service exposes the pod replicas managed by the deployment named yoda-deployment. - The service listens on port 80 and its targetPort matches the port exposed by the pods. - The service type is ClusterIP.

"},{"location":"labs/kubernetes/lab8/#setup-environment","title":"Setup environment:","text":"
kubectl apply -f https://gist.githubusercontent.com/csantanapr/87df4292e94441617707dae5de488cf4/raw/cb515f7bae77a3f0e76fdc7f6aa0f4e89cc5fec7/lab-8-service-setup.yaml\n
"},{"location":"labs/kubernetes/lab8/solution/","title":"Kubernetes Lab 8 - Services","text":""},{"location":"labs/kubernetes/lab8/solution/#solution","title":"Solution","text":"
apiVersion: v1\nkind: Service\nmetadata:\n  name: jedi-svc\nspec:\n  type: NodePort\n  selector:\n    app: jedi\n  ports:\n  - protocol: TCP\n    port: 80\n    targetPort: 8080\n
apiVersion: v1\nkind: Service\nmetadata:\n  name: yoda-svc\nspec:\n  type: ClusterIP\n  selector:\n    app: yoda\n  ports:\n  - protocol: TCP\n    port: 80\n    targetPort: 8080\n
"},{"location":"labs/kubernetes/lab9/","title":"Kubernetes Lab 9 - Network Policies","text":""},{"location":"labs/kubernetes/lab9/#problem","title":"Problem","text":"

Setup minikube

minikube start --network-plugin=cni\nkubectl apply -f https://docs.projectcalico.org/v3.9/manifests/calico.yaml\nkubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true\nkubectl -n kube-system get pods | grep calico-node\n

Create secured pod

apiVersion: v1\nkind: Pod\nmetadata:\n  name: network-policy-secure-pod\n  labels:\n    app: secure-app\nspec:\n  containers:\n  - name: nginx\n    image: bitnami/nginx\n    ports:\n    - containerPort: 8080\n

Create client pod

apiVersion: v1\nkind: Pod\nmetadata:\n  name: network-policy-client-pod\nspec:\n  containers:\n  - name: busybox\n    image: radial/busyboxplus:curl\n    command: [\"/bin/sh\", \"-c\", \"while true; do sleep 3600; done\"]\n

Create a policy to allow only client pods with label allow-access: \"true\" to access secure pod

"},{"location":"labs/kubernetes/lab9/solution/","title":"Kubernetes Lab 9 - Network Policies","text":""},{"location":"labs/kubernetes/lab9/solution/#solution","title":"Solution","text":"
apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: my-network-policy\nspec:\n  podSelector:\n    matchLabels:\n      app: secure-app\n  policyTypes:\n  - Ingress\n  ingress:\n  - from:\n    - podSelector:\n        matchLabels:\n          allow-access: \"true\"\n
"},{"location":"openshift/","title":"Kubernetes & OpenShift Overview","text":""},{"location":"openshift/#introduction","title":"Introduction","text":"

Kubernetes is an open source container orchestration platform that automates deployment, management and scaling of applications. Learn how Kubernetes enables cost-effective cloud native development.

"},{"location":"openshift/#what-is-kubernetes","title":"What is Kubernetes?","text":"

Kubernetes\u2014also known as \u2018k8s\u2019 or \u2018kube\u2019\u2014is a container orchestration platform for scheduling and automating the deployment, management, and scaling of containerized applications.

Kubernetes was first developed by engineers at Google before being open sourced in 2014. It is a descendant of \u2018Borg,\u2019 a container orchestration platform used internally at Google. (Kubernetes is Greek for helmsman or pilot, hence the helm in the Kubernetes logo.)

Today, Kubernetes and the broader container ecosystem are maturing into a general-purpose computing platform and ecosystem that rivals\u2014if not surpasses\u2014virtual machines (VMs) as the basic building blocks of modern cloud infrastructure and applications. This ecosystem enables organizations to deliver a high-productivity Platform-as-a-Service (PaaS) that addresses multiple infrastructure- and operations-related tasks and issues surrounding cloud native development so that development teams can focus solely on coding and innovation.

https://www.ibm.com/cloud/learn/kubernetes

"},{"location":"openshift/#presentations","title":"Presentations","text":"

Kubernetes Overview

"},{"location":"openshift/#predictable-demands-pattern","title":"Predictable Demands Pattern","text":"

An application's performance, efficiency, and behaviors are reliant upon it's ability to have the appropriate allocation of resources. The Predictable Demands pattern is based on declaring the dependencies and resources needed by a given application. The scheduler will prioritize an application with a defined set of resources and dependencies since it can better manage the workload across nodes in the cluster. Each application has a different set of dependencies which we will touch on next.

"},{"location":"openshift/#runtime-dependencies","title":"Runtime Dependencies","text":"

One of the most common runtime dependency's is the exposure of a container's specific port through hostPort. Different applications can specify the same port through hostPort which reserves the port on each node in the cluster for the specific container. This declaration restricts multiple continers with the same hostPort to be deployed on the same nodes in the cluster and restricts the scale of pods to the number of nodes you have in the cluster.

Another runtime dependency is file storage for saving the application state. Kubernetes offers Pod-level storage utilities that are capable of surviving container restarts. Applications needing to read or write to these storage mechanisms will require nodes that is provided the type of volume required by the application. If there is no nodes available with the required volume type, then the pod will not be scheduled to be deployed at all.

A different kind of dependency is configurations. ConfigMaps are used by Kubernetes to strategically plan out how to consume it's settings through either environment variables or the filesystem. Secrets are consumed the same was as a ConfigMap in Kubernetes. Secrets are a more secure way to distribute environment-specific configurations to containers within the pod.

"},{"location":"openshift/#resource-profiles","title":"Resource Profiles","text":"

Resource Profiles are definitions for the compute resources required for a container. Resources are categorized in two ways, compressible and incompressible. Compressible resources include resources that can be throttled such as CPU or network bandwidth. Incompressible represents resouces that can't be throttled such as memory where there is no other way to release the allocated resource other than killing the container. The difference between compressible and incompressible is very important when it comes to planning the deployment of pods and containers since the resource allocation can be affected by the limits of each.

Every application needs to have a specified minimum and maximum amount of resources that are needed. The minimum amount is called \"requests\" and the maximum is the \"limits\". The scheduler uses the requests to determine the assignment of pods to nodes ensuring that the node will have enough capacity to accommodate the pod and all of it's containers required resources. An example of defined resource limits is below:

Different levels of Quality of Service (QoS) are offered based on the specified requests and limits.

  1. Quality of Service Levels Best Effort;; Lowest priority pod with no requests or limits set for it's containers. These pods will be the first of any pods killed if resources run low. Burstable;; Limits and requests are defined but they are not equal. The pod will use the minimum amount of resources, but will consume more if needed up to the limit. If the needed resources become scarce then these pods will be killed if no Best Effort pods are left. Guaranteed;; Highest priority pods with an equal amount of requests and limits. These pods will be the last to be killed if resources run low and no Best Effort or Burstable pods are left.
"},{"location":"openshift/#pod-priority","title":"Pod Priority","text":"

The priority of pods can be defined through a PriorityClass object. The PriorityClass object allows developers to indicate the importance of a pod relative to the other pods in the cluster. The higher the priority number then the higher the priority of the pod. The scheduler looks at a pods priorityClassName to populate the priority of new pods. As pods are being placed in the scheduling queue for deployment, the scheduler orders them from highest to lowest.

Another key feature for pod priority is the Preemption feature. The Preemption feature occurs when there are no nodes with enough capacity to place a pod. If this occurs the scheduler can preempt (remove) lower-priority Pods from nodes to free up resources and place Pods with higher priority. This effectively allows system administrators the ability to control which critical pods get top priority for resources in the cluster as well as controlling which critical workloads are able to be run on the cluster first. If a pod can not be scheduled due to constraints it will continue on with lower-priority nodes.

Pod Priority should be used with caution for this gives users the ability to control over the kubernetes scheduler and ability to place or kill pods that may interrupt the cluster's critical functions. New pods with higher priority than others can quickly evict pods with lower priority that may be critical to a container's performance. ResourceQuota and PodDisruptionBudget are two tools that help combat this from happening read more here.

"},{"location":"openshift/#declarative-deployment-pattern","title":"Declarative Deployment Pattern","text":"

With a growing number of microservices, reliance on an updating process for the services has become ever more important. Upgrading services is usually accompanied with some downtime for users or an increase in resource usage. Both of these can lead to an error effecting the performance of the application making the release process a bottleneck.

A way to combat this issue in Kubernetes is through the use of Deployments. There are different approaches to the updating process that we will cover below. Any of these approaches can be put to use in order to save time for developers during their release cycles which can last from a few minutes to a few months.

"},{"location":"openshift/#rolling-deployment","title":"Rolling Deployment","text":"

A Rolling Deployment ensures that there is no downtime during the update process. Kubernetes creates a new ReplicaSet for the new version of the service to be rolled out. From there Kubernetes creates set of pods of the new version while leaving the old pods running. Once the new pods are all up and running they will replace the old pods and become the primary pods users access.

The upside to this approach is that there is no downtime and the deployment is handled by kubernetes through a deployment like the one below. The downside is with two sets of pods running at one time there is a higher usage of resources that may lead to performance issues for users.

"},{"location":"openshift/#fixed-deployment","title":"Fixed Deployment","text":"

A Fixed Deployment uses the Recreate strategy which sets the maxUnavailable setting to the number of declared replicas. This in effect starts the versions of the pods as the old versions are being killed. The starting and stopping of containers does create a little bit of downtime for customers while the starting and stopping is taking place, but the positive side is the users will only have to handle one version at a time.

"},{"location":"openshift/#blue-green-release","title":"Blue-Green Release","text":"

A Blue-Green Release involves a manual process of creating a second deployment of pods with the newest version of the application running as well as keeping the old version of pods running in the cluster. Once the new pods are up and running properly the administrator shifts the traffic over to the new pods. Below is a diagram showing both versions up and running with the traffic going to the newer (green) pods.

The downfall to this approach is the use of resources with two separate groups of pods running at the same time which could cause performance issues or complications. However, the advantage of this approach is users only experience one version at a time and it's easy to quickly switch back to the old version with no downtime if an issue arises with the newer version.

"},{"location":"openshift/#canary-release","title":"Canary Release","text":"

A Canary Release involves only standing up one pod of the new application code and shifting only a limited amount of new users traffic to that pod. This approach reduces the number of people exposed to the new service allowing the administrator to see how the new version is performing. Once the team feels comfortable with the performance of the new service then more pods can be stood up to replace the old pods. An advantage to this approach is no downtime with any of the services as the new service is being scaled.

"},{"location":"openshift/#health-probe-pattern","title":"Health Probe Pattern","text":"

The Health Probe pattern revolves the health of applications being communicated to Kubernetes. To be fully-automatable, cloud-applications must be highly observable in order for Kubernetes to know which applications are up and ready to receive traffic and which cannot. Kubernetes can use that information for traffic direction, self-healing, and to achieve the desired state of the application.

"},{"location":"openshift/#process-health-checks","title":"Process Health Checks","text":"

The simplest health check in kubernetes is the Process Health Check. Kubernetes simply probes the application's processes to see if they are running or not. The process check tells kubernetes when a process for an application needs to be restarted or shut down in the case of a failure.

"},{"location":"openshift/#liveness-probes","title":"Liveness Probes","text":"

A Liveness Probe is performed by the Kubernetes Kubelet agent and asks the container to confirm it's health. A simple process check can return that the container is healthy, but the container to users may not be performing correctly. The liveness probe addresses this issue but asking the container for its health from outside of the container itself. If a failure is found it may require that the container be restarted to get back to normal health. A liveness probe can perform the following actions to check health:

  • HTTP GET and expects a success which is code 200-399.
  • A TCP Socket Probe and expects a successful connection.
  • A Exec Probe which executes a command and expects a successful exit code (0).

The action chosen to be performed for testing depends on the nature of the application and which action fits best. Always keep in mind that a failing health check results in a restart of the container from Kubernetes, so make sure the right health check is in place if the underlying issue can't be fixed.

"},{"location":"openshift/#readiness-probes","title":"Readiness Probes","text":"

A Readiness Probe is very similar to a Liveness probe, but the resulting action to a failed Readiness probe is different. When a liveness probe fails the container is restarted and, in some scenarios, a simple restart won't fix the issue, which is where a readiness probe comes in. A failed readiness probe won't restart the container but will disconnect it from the traffic endpoint. Removing a container from traffic allows it to get up and running smoothly before being tossed into service unready to handle requests from users. Readiness probes give an application time to catch up and make itself ready again to handle more traffic versus shutting down completely and simply creating a new pod. In most cases, liveness and readiness probes are run together on the same application to make sure that the container has time to get up and running properly as well as stays healthy enough to handle the traffic.

"},{"location":"openshift/#managed-lifecycle-pattern","title":"Managed Lifecycle Pattern","text":"

The Managed Lifecycle pattern describes how containers need to adapt their lifecycles based on the events that are communicated from a managing platform such as Kubernetes. Containers do not have control of their own lifecycles. It's the managing platforms that allow them to live or die, get traffic or have none, etc. This pattern covers how the different events can affect those lifecycle decisions.

"},{"location":"openshift/#sigterm","title":"SIGTERM","text":"

The SIGTERM is a signal that is sent from the managing platform to a container or pod that instructs the pod or container to shutdown or restart. This signal can be sent due to a failed liveness test or a failure inside the container. SIGKILL allows the container to cleaning and properly shut itself down versus SIGKILL, which we will get to next. Once received, the application will shutdown as quickly as it can, allowing other processes to stop properly and cleaning up other files. Each application will have a different shutdown time based on the tasks needed to be done.

"},{"location":"openshift/#sigkill","title":"SIGKILL","text":"

SIGKILL is a signal sent to a container or pod forcing it to shutdown. A SIGKILL is normally sent after the SIGTERM signal. There is a default 30 second grace period between the time that SIGTERM is sent to the application and SIGKILL is sent. The grace period can be adjusted for each pod using the .spec.terminationGracePeriodSeconds field. The overall goal for containerized applications should be aimed to have designed and implemented quick startup and shutdown operations.

"},{"location":"openshift/#poststart","title":"postStart","text":"

The postStart hook is a command that is run after the creation of a container and begins asynchronously with the container's primary process. PostStart is put in place in order to give the container time to warm up and check itself during startup. During the postStart loop the container will be labeled in \"pending\" mode in kubernetes while running through it's initial processes. If the postStart function errors out it will do so with a nonzero exit code and the container process will be killed by Kubernetes. Careful planning must be done when deciding what logic goes into the postStart function because if it fails the container will also fail to start. Both postStart and preStop have two handler types that they run:

  • exec: Runs a command directly in the container.

  • httpGet: Executes an HTTP GET request against an opened port on the pod container.

"},{"location":"openshift/#prestop","title":"preStop","text":"

The preStop hook is a call that blocks a container from terminating too quickly and makes sure the container has a graceful shutdown. The preStop call must finish before the container is deleted by the container runtime. The preStop signal does not stop the container from being deleted completely, it is only an alternative to a SIGTERM signal for a graceful shutdown.

"},{"location":"openshift/configuration/","title":"Container Configuration","text":""},{"location":"openshift/configuration/#command-and-argument","title":"Command and Argument","text":"

When you create a Pod, you can define a command and arguments for the containers that run in the Pod.

The command and arguments that you define in the configuration file override the default command and arguments provided by the container image

Dockerfile vs Kubernetes Dockerfile Entrypoint -> k8s command Dockerfile CMD -> k8s args

"},{"location":"openshift/configuration/#ports","title":"Ports","text":"

When you create a Pod, you can specify the port number the container exposes, as best practice is good to put a name, this way a service can specify targetport by name reference.

"},{"location":"openshift/configuration/#environment-variable","title":"Environment Variable","text":"

When you create a Pod, you can set environment variables for the containers that run in the Pod. To set environment variables, include the env or envFrom field in the container configuration

A Pod can use environment variables to expose information about itself to Containers running in the Pod. Environment variables can expose Pod fields and Container fields

"},{"location":"openshift/configuration/#resources","title":"Resources","text":"OpenShift & Kubernetes

Container Commands

Environment Variables

Pod Exposing

"},{"location":"openshift/configuration/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-cmd-pod\nspec:\n  containers:\n  - name: myapp-container\n    image: busybox\n    command: ['echo']\n  restartPolicy: Never\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-arg-pod\nspec:\n  containers:\n  - name: myapp-container\n    image: busybox\n    command: ['echo']\n    args: ['Hello World']\n  restartPolicy: Never\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-port-pod\nspec:\n  containers:\n  - name: myapp-container\n    image: bitnami/nginx\n    ports:\n    - containerPort: 8080\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-env-pod\nspec:\n  restartPolicy: Never\n  containers:\n  - name: c\n    image: busybox\n    env:\n    - name: DEMO_GREETING\n      value: \"Hello from the environment\"\n    command: [\"echo\"]\n    args: [\"$(DEMO_GREETING)\"]\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-inter-pod\n  labels:\n    app: jedi\nspec:\n  restartPolicy: Never\n  containers:\n    - name: myapp\n      image: bitnami/nginx\n      ports:\n        - containerPort: 8080\n          name: http\n      env:\n        - name: MY_NODE_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: spec.nodeName\n        - name: MY_POD_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.name\n        - name: MY_POD_IP\n          valueFrom:\n            fieldRef:\n              fieldPath: status.podIP\n      command: [\"echo\"]\n      args: [\"$(MY_NODE_NAME) $(MY_POD_NAME) $(MY_POD_IP)\"]\n
"},{"location":"openshift/configuration/#resource-requirements","title":"Resource Requirements","text":"

When you specify a Pod, you can optionally specify how much CPU and memory (RAM) each Container needs. When Containers have resource requests specified, the scheduler can make better decisions about which nodes to place Pods on.

CPU and memory are each a resource type. A resource type has a base unit. CPU is specified in units of cores, and memory is specified in units of bytes.

"},{"location":"openshift/configuration/#resources_1","title":"Resources","text":"OpenShift & Kubernetes

Compute Resources

Memory Management

"},{"location":"openshift/configuration/#references_1","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  containers:\n  - name: my-app\n    image: bitnami/nginx\n    ports:\n      - containerPort: 8080\n    resources:\n      requests:\n        memory: \"64Mi\"\n        cpu: \"250m\"\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n

Namespaced defaults mem

apiVersion: v1\nkind: LimitRange\nmetadata:\n  name: mem-limit-range\nspec:\n  limits:\n  - default:\n      memory: 512Mi\n    defaultRequest:\n      memory: 256Mi\n    type: Container\n

Namespaced defaults mem

apiVersion: v1\nkind: LimitRange\nmetadata:\n  name: cpu-limit-range\nspec:\n  limits:\n  - default:\n      cpu: 1\n    defaultRequest:\n      cpu: 0.5\n    type: Container\n

"},{"location":"openshift/configuration/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Pod Configuration Configure a pod to meet compute resource requirements. Pod Configuration"},{"location":"openshift/configuration/config-map/","title":"Config Maps","text":"

ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable.

You can data from a ConfigMap in 3 different ways. - As a single environment variable specific to a single key - As a set of environment variables from all keys - As a set of files, each key represented by a file on mounted volume

"},{"location":"openshift/configuration/config-map/#resources","title":"Resources","text":"OpenShiftKubernetes

Mapping Volumes

ConfigMaps

"},{"location":"openshift/configuration/config-map/#references","title":"References","text":"
apiVersion: v1\nkind: ConfigMap\nmetadata:\n   name: my-cm\ndata:\n   color: blue\n   location: naboo\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  restartPolicy: Never\n  containers:\n    - name: myapp\n      image: busybox\n      command: [\"echo\"]\n      args: [\"color is $(MY_VAR)\"]\n      env:\n        - name: MY_VAR\n          valueFrom:\n            configMapKeyRef:\n              name: my-cm\n              key: color\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  restartPolicy: Never\n  containers:\n    - name: myapp\n      image: busybox\n      command:\n        [\n          \"sh\",\n          \"-c\",\n          \"ls -l /etc/config; echo located at $(cat /etc/config/location)\",\n        ]\n      volumeMounts:\n        - name: config-volume\n          mountPath: /etc/config\n  volumes:\n    - name: config-volume\n      configMap:\n        name: my-cm\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  restartPolicy: Never\n  containers:\n    - name: myapp\n      image: busybox\n      command: [\"/bin/sh\", \"-c\", \"env | sort\"]\n      envFrom:\n        - configMapRef:\n            name: my-cm\n  restartPolicy: Never\n
"},{"location":"openshift/configuration/secrets/","title":"Secrets","text":"

Kubernetes secret objects let you store and manage sensitive information, such as passwords, OAuth tokens, and ssh keys. Putting this information in a secret is safer and more flexible than putting it verbatim in a Pod definition or in a container image.

A Secret is an object that contains a small amount of sensitive data such as a password, a token, or a key. Such information might otherwise be put in a Pod specification or in an image; putting it in a Secret object allows for more control over how it is used, and reduces the risk of accidental exposure.

"},{"location":"openshift/configuration/secrets/#resources","title":"Resources","text":"OpenShiftKubernetes
  • Image Pull Secrets

    Install mkdocs-material with pip and get up and running in minutes

    Getting started

  • It's just Markdown

    Focus on your content and generate a responsive and searchable static site

    Reference

Image Pull Secrets

Secret Commands

Secrets

Secret Distribution

"},{"location":"openshift/configuration/secrets/#references","title":"References","text":"
apiVersion: v1\nkind: Secret\nmetadata:\n  name: mysecret\ntype: Opaque\ndata:\n  username: YWRtaW4=\nstringData:\n  admin: administrator\n
apiVersion: v1\nkind: Secret\nmetadata:\n  name: mysecret-config\ntype: Opaque\nstringData:\n  config.yaml: |-\n    apiUrl: \"https://my.api.com/api/v1\"\n    username: token\n    password: thesecrettoken\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  containers:\n  - name: my-app\n    image: bitnami/nginx\n    ports:\n      - containerPort: 8080\n    env:\n      - name: SECRET_USERNAME\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: username\n    envFrom:\n      - secretRef:\n          name: mysecret\n    volumeMounts:\n      - name: config\n        mountPath: \"/etc/secrets\"\n  volumes:\n    - name: config\n      secret:\n        secretName: mysecret-config\n
OpenShiftKubernetes

Create files needed for rest of example.

echo -n 'admin' > ./username.txt\necho -n '1f2d1e2e67df' > ./password.txt\n
Creating Secret from files.
oc create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt\n
Getting Secret
oc get secrets\n
Gets the Secret's Description.
oc describe secrets/db-user-pass\n

Create files needed for rest of example.

echo -n 'admin' > ./username.txt\necho -n '1f2d1e2e67df' > ./password.txt\n
** Creates the Secret from the files
kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt\n
Gets the Secret
kubectl get secrets\n
Gets the Secret's Description.**
kubectl describe secrets/db-user-pass\n

"},{"location":"openshift/configuration/security-contexts/","title":"Security Contexts","text":"

A security context defines privilege and access control settings for a Pod or Container.

To specify security settings for a Pod, include the securityContext field in the Pod specification. The securityContext field is a PodSecurityContext object. The security settings that you specify for a Pod apply to all Containers in the Pod.

"},{"location":"openshift/configuration/security-contexts/#resources","title":"Resources","text":"OpenShiftKubernetes

Managing Security Contexts

Security Contexts

"},{"location":"openshift/configuration/security-contexts/#references","title":"References","text":"

Setup minikube VM with users

minikube ssh\n
su -\n
echo \"container-user-0:x:2000:2000:-:/home/container-user-0:/bin/bash\" >> /etc/passwd\necho \"container-user-1:x:2001:2001:-:/home/container-user-1:/bin/bash\" >> /etc/passwd\necho \"container-group-0:x:3000:\" >>/etc/group\necho \"container-group-1:x:3001:\" >>/etc/group\nmkdir -p /etc/message/\necho \"Hello, World!\" | sudo tee -a /etc/message/message.txt\nchown 2000:3000 /etc/message/message.txt\nchmod 640 /etc/message/message.txt\n

Using the this securityContext the container will be able to read the file /message/message.txt

apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-securitycontext-pod\nspec:\n  restartPolicy: Never\n  securityContext:\n    runAsUser: 2000\n    runAsGroup: 3000\n    fsGroup: 3000\n  containers:\n    - name: myapp-container\n      image: busybox\n      command: [\"sh\", \"-c\", \"cat /message/message.txt && sleep 3600\"]\n      volumeMounts:\n        - name: message-volume\n          mountPath: /message\n  volumes:\n    - name: message-volume\n      hostPath:\n        path: /etc/message\n

Using the this securityContext the container should NOT be able to read the file /message/message.txt

apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-securitycontext-pod\nspec:\n  restartPolicy: Never\n  securityContext:\n    runAsUser: 2001\n    runAsGroup: 3001\n    fsGroup: 3001\n  containers:\n    - name: myapp-container\n      image: busybox\n      command: [\"sh\", \"-c\", \"cat /message/message.txt && sleep 3600\"]\n      volumeMounts:\n        - name: message-volume\n          mountPath: /message\n  volumes:\n    - name: message-volume\n      hostPath:\n        path: /etc/message\n
** Run to see the errors **

OpenShiftKubernetes Get Pod Logs
oc logs my-securitycontext-pod\n
Should return
cat: can't open '/message/message.text': Permission denied\n
Get Pod Logs
kubectl logs my-securitycontext-pod\n
Should return
cat: can't open '/message/message.txt': Permission denied\n
"},{"location":"openshift/configuration/service-accounts/","title":"Service Accounts","text":"

A service account provides an identity for processes that run in a Pod.

When you (a human) access the cluster (for example, using kubectl), you are authenticated by the apiserver as a particular User Account (currently this is usually admin, unless your cluster administrator has customized your cluster). Processes in containers inside pods can also contact the apiserver. When they do, they are authenticated as a particular Service Account (for example, default).

User accounts are for humans. Service accounts are for processes, which run in pods.

User accounts are intended to be global. Names must be unique across all namespaces of a cluster, future user resource will not be namespaced. Service accounts are namespaced.

"},{"location":"openshift/configuration/service-accounts/#resources","title":"Resources","text":"OpenShiftKubernetes

Service Accounts

Using Service Accounts

Service Accounts

Service Account Configuration

"},{"location":"openshift/configuration/service-accounts/#references","title":"References","text":"
apiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: my-service-account\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  serviceAccountName: my-service-account\n  containers:\n  - name: my-app\n    image: bitnami/nginx\n    ports:\n      - containerPort: 8080\n
apiVersion: v1\nkind: Secret\nmetadata:\n  name: build-robot-secret\n  annotations:\n    kubernetes.io/service-account.name: my-service-account\ntype: kubernetes.io/service-account-token\n
OpenshiftKubernetes Create a Service Account
oc create sa <service_account_name>\n
View Service Account Details
oc describe sa <service_account_name>\n
Create a Service Account
kubectl create sa <service_account_name>\n
View Service Account Details
kubectl describe sa <service_account_name>\n
"},{"location":"openshift/core-concepts/","title":"Kubernetes API Primitives","text":"

Kubernetes API primitive, also known as Kubernetes objects, are the basic building blocks of any application running in Kubernetes

Examples:

  • Pod
  • Node
  • Service
  • ServiceAccount

Two primary members

  • Spec, desired state
  • Status, current state
"},{"location":"openshift/core-concepts/#resources","title":"Resources","text":"OpenShiftKubernetes

Pods

Nodes

Objects

Kube Basics

"},{"location":"openshift/core-concepts/#references","title":"References","text":"OpenShiftKubernetes List API-Resources
oc api-resources\n
List API-Resources
kubectl api-resources\n
"},{"location":"openshift/core-concepts/namespaces-projects/","title":"Projects/Namespaces","text":"

Namespaces are intended for use in environments with many users spread across multiple teams, or projects.

Namespaces provide a scope for names. Names of resources need to be unique within a namespace, but not across namespaces.

Namespaces are a way to divide cluster resources between multiple users (via resource quota).

It is not necessary to use multiple namespaces just to separate slightly different resources, such as different versions of the same software: use labels to distinguish resources within the same namespace. In practice namespaces are used to deploy different versions based on stages of the CICD pipeline (dev, test, stage, prod)

"},{"location":"openshift/core-concepts/namespaces-projects/#resources","title":"Resources","text":"OpenShiftKubernetes

Working with Projects

Creating Projects

Configure Project Creation

Namespaces

"},{"location":"openshift/core-concepts/namespaces-projects/#references","title":"References","text":"Namespace YAML
apiVersion: v1\nkind: Namespace\nmetadata:\n  name: dev\n
Pod YAML specifiying Namespace
apiVersion: v1\nkind: Pod\nmetadata:\n  name: myapp-pod\n  namespace: dev\nspec:\n  containers:\n    - name: myapp-container\n      image: busybox\n      command: [\"sh\", \"-c\", \"echo Hello Kubernetes! && sleep 3600\"]\n
OpenShiftKubernetes Getting all namespaces/projects
oc projects\n
Create a new Project
oc new-project dev\n
Viewing Current Project
oc project\n
Setting Namespace in Context
oc project dev\n
Viewing Project Status
oc status\n
Getting all namespaces
kubectl get namespaces\n
Create a new namespace called bar
kubectl create ns dev\n
Setting Namespace in Context
kubectl config set-context --current --namespace=dev\n
"},{"location":"openshift/deployments/","title":"Deployments","text":"

A Deployment provides declarative updates for Pods and ReplicaSets.

You describe a desired state in a Deployment, and the Deployment Controller changes the actual state to the desired state at a controlled rate. You can define Deployments to create new ReplicaSets, or to remove existing Deployments and adopt all their resources with new Deployments.

The following are typical use cases for Deployments: - Create a Deployment to rollout a ReplicaSet. The ReplicaSet creates Pods in the background. Check the status of the rollout to see if it succeeds or not. - Declare the new state of the Pods by updating the PodTemplateSpec of the Deployment. A new ReplicaSet is created and the Deployment manages moving the Pods from the old ReplicaSet to the new one at a controlled rate. Each new ReplicaSet updates the revision of the Deployment. - Rollback to an earlier Deployment revision if the current state of the Deployment is not stable. Each rollback updates the revision of the Deployment. - Scale up the Deployment to facilitate more load. - Pause the Deployment to apply multiple fixes to its PodTemplateSpec and then resume it to start a new rollout. - Use the status of the Deployment as an indicator that a rollout has stuck. - Clean up older ReplicaSets that you don\u2019t need anymore.

"},{"location":"openshift/deployments/#resources","title":"Resources","text":"OpenShiftKubernetes

Deployments

Managing Deployment Processes

DeploymentConfig Strategies

Route Based Deployment Strategies

Deployments

Scaling Deployments

"},{"location":"openshift/deployments/#references","title":"References","text":"
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment\n  labels:\n    app: nginx\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: bitnami/nginx:1.16.0\n        ports:\n        - containerPort: 8080\n
OpenshiftKubernetes Create a Deployment
oc apply -f deployment.yaml\n
Get Deployment
oc get deployment my-deployment\n
Get Deployment's Description
oc describe deployment my-deployment\n
Edit Deployment
oc edit deployment my-deployment\n
Scale Deployment
oc scale deployment/my-deployment --replicas=3\n
Delete Deployment
oc delete deployment my-deployment\n
Create a Deployment
kubectl apply -f deployment.yaml\n
Get Deployment
kubectl get deployment my-deployment\n
Get Deployment's Description
kubectl describe deployment my-deployment\n
Edit Deployment
kubectl edit deployment my-deployment\n
Scale Deployment
kubectl scale deployment/my-deployment --replicas=3\n
Delete Deployment
kubectl delete deployment my-deployment\n
"},{"location":"openshift/deployments/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Rolling Updates Lab Create a Rolling Update for your application Rolling Updates"},{"location":"openshift/deployments/updates/","title":"Rolling Updates and Rollbacks","text":"

Updating a Deployment A Deployment\u2019s rollout is triggered if and only if the Deployment\u2019s Pod template (that is, .spec.template) is changed, for example if the labels or container images of the template are updated. Other updates, such as scaling the Deployment, do not trigger a rollout.

Each time a new Deployment is observed by the Deployment controller, a ReplicaSet is created to bring up the desired Pods. If the Deployment is updated, the existing ReplicaSet that controls Pods whose labels match .spec.selector but whose template does not match .spec.template are scaled down. Eventually, the new ReplicaSet is scaled to .spec.replicas and all old ReplicaSets is scaled to 0.

Label selector updates It is generally discouraged to make label selector updates and it is suggested to plan your selectors up front. In any case, if you need to perform a label selector update, exercise great caution and make sure you have grasped all of the implications.

Rolling Back a Deployment Sometimes, you may want to rollback a Deployment; for example, when the Deployment is not stable, such as crash looping. By default, all of the Deployment\u2019s rollout history is kept in the system so that you can rollback anytime you want (you can change that by modifying revision history limit).

A Deployment\u2019s revision is created when a Deployment\u2019s rollout is triggered. This means that the new revision is created if and only if the Deployment\u2019s Pod template (.spec.template) is changed, for example if you update the labels or container images of the template. Other updates, such as scaling the Deployment, do not create a Deployment revision, so that you can facilitate simultaneous manual- or auto-scaling. This means that when you roll back to an earlier revision, only the Deployment\u2019s Pod template part is rolled back.

"},{"location":"openshift/deployments/updates/#resources","title":"Resources","text":"OpenShiftKubernetes

Rollouts

Rolling Back

Updating a Deployment

Rolling Back a Deployment

"},{"location":"openshift/deployments/updates/#references","title":"References","text":"
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment\n  labels:\n    app: nginx\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: bitnami/nginx:1.16.0\n        ports:\n        - containerPort: 8080\n
OpenShiftKubernetes Get Deployments
oc get deployments\n
Sets new image for Deployment
oc set image deployment/my-deployment nginx=bitnami/nginx:1.16.1 --record\n
Check the status of a rollout
oc rollout status deployment my-deployment\n
Get Replicasets
oc get rs\n
Get Deployment Description
oc describe deployment my-deployment\n
Get Rollout History
oc rollout history deployment my-deployment\n
Undo Rollout
oc rollback my-deployment\n
Delete Deployment
oc delete deployment my-deployment\n
Create a Deployment
kubectl apply -f deployment.yaml\n
Create a new namespace called bar
kubectl create ns dev\n
Setting Namespace in Context
kubectl config set-context --current --namespace=dev\n
"},{"location":"openshift/operators/operators/","title":"Operators","text":"

Operators in Openshift are...

"},{"location":"openshift/pods/","title":"Pods","text":"

A Pod is the basic execution unit of a Kubernetes application\u2013the smallest and simplest unit in the Kubernetes object model that you create or deploy. A Pod represents processes running on your Cluster.

A Pod encapsulates an application\u2019s container (or, in some cases, multiple containers), storage resources, a unique network IP, and options that govern how the container(s) should run. A Pod represents a unit of deployment: a single instance of an application in Kubernetes, which might consist of either a single container or a small number of containers that are tightly coupled and that share resources.

"},{"location":"openshift/pods/#resources","title":"Resources","text":"OpenShiftKubernetes

About Pods

Cluster Configuration for Pods

Pod Autoscaling

Pod Overview

Pod Lifecycle

Pod Usage

"},{"location":"openshift/pods/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: myapp-pod\n  labels:\n    app: myapp\nspec:\n  containers:\n    - name: myapp-container\n      image: busybox\n      command: [\"sh\", \"-c\", \"echo Hello Kubernetes! && sleep 3600\"]\n
OpenShiftKubernetes

Create Pod using yaml file

oc apply -f pod.yaml\n

Get Current Pods in Project

oc get pods\n

Get Pods with their IP and node location

oc get pods -o wide\n

Get Pod's Description

oc describe pod myapp-pod\n

Get the logs

oc logs myapp-pod\n

Delete a Pod

oc delete pod myapp-pod\n

Create Pod using yaml file

kubectl apply -f pod.yaml\n

Get Current Pods in Project

kubectl get pods\n

Get Pods with their IP and node location

kubectl get pods -o wide\n

Get Pod's Description

kubectl describe pod myapp-pod\n

Get the logs

kubectl logs myapp-pod\n

Delete a Pod

kubectl delete pod myapp-pod\n
"},{"location":"openshift/pods/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Creating Pods Create a Pod YAML file to meet certain parameters Pod Creation"},{"location":"openshift/pods/health-checks/","title":"Health and Monitoring","text":""},{"location":"openshift/pods/health-checks/#liveness-and-readiness-probes","title":"Liveness and Readiness Probes","text":"

A Probe is a diagnostic performed periodically by the kubelet on a Container. To perform a diagnostic, the kubelet calls a Handler implemented by the Container. There are three types of handlers:

ExecAction: Executes a specified command inside the Container. The diagnostic is considered successful if the command exits with a status code of 0.

TCPSocketAction: Performs a TCP check against the Container\u2019s IP address on a specified port. The diagnostic is considered successful if the port is open.

HTTPGetAction: Performs an HTTP Get request against the Container\u2019s IP address on a specified port and path. The diagnostic is considered successful if the response has a status code greater than or equal to 200 and less than 400.

The kubelet can optionally perform and react to three kinds of probes on running Containers:

livenessProbe: Indicates whether the Container is running. Runs for the lifetime of the Container.

readinessProbe: Indicates whether the Container is ready to service requests. Only runs at start.

"},{"location":"openshift/pods/health-checks/#resources","title":"Resources","text":"OpenShiftKubernetes

Application Health

Virtual Machine Health

Container Probes

Configure Probes

"},{"location":"openshift/pods/health-checks/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  containers:\n  - name: app\n    image: busybox\n    command: ['sh', '-c', \"echo Hello, Kubernetes! && sleep 3600\"]\n    livenessProbe:\n      exec:\n        command: ['echo','alive']\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  shareProcessNamespace: true\n  containers:\n  - name: app\n    image: bitnami/nginx\n    ports:\n    - containerPort: 8080\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 10\n    readinessProbe:\n      httpGet:\n        path: /\n        port: 8080\n      periodSeconds: 10\n
"},{"location":"openshift/pods/health-checks/#container-logging","title":"Container Logging","text":"

Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity.

Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster.

"},{"location":"openshift/pods/health-checks/#resources_1","title":"Resources","text":"

OpenShift

  • Logs Command
  • Cluster Logging
  • Logging Collector

IKS

  • Logging
"},{"location":"openshift/pods/health-checks/#references_1","title":"References","text":"Pod Example
apiVersion: v1\nkind: Pod\nmetadata:\n  name: counter\nspec:\n  containers:\n  - name: count\n    image: busybox\n    command: ['sh','-c','i=0; while true; do echo \"$i: $(date)\"; i=$((i+1)); sleep 5; done']\n
OpenShiftKubernetes Get Logs
oc logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
Get Logs
kubectl logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
"},{"location":"openshift/pods/health-checks/#monitoring-applications","title":"Monitoring Applications","text":"

To scale an application and provide a reliable service, you need to understand how the application behaves when it is deployed. You can examine application performance in a Kubernetes cluster by examining the containers, pods, services, and the characteristics of the overall cluster. Kubernetes provides detailed information about an application\u2019s resource usage at each of these levels. This information allows you to evaluate your application\u2019s performance and where bottlenecks can be removed to improve overall performance.

Prometheus, a CNCF project, can natively monitor Kubernetes, nodes, and Prometheus itself.

"},{"location":"openshift/pods/health-checks/#resources_2","title":"Resources","text":"

OpenShift

  • Monitoring Application Health

IKS

  • Monitoring Resource Usage
  • Resource Metrics
"},{"location":"openshift/pods/health-checks/#references_2","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: 500m\nspec:\n  containers:\n  - name: app\n    image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4\n    resources:\n      requests:\n        cpu: 700m\n        memory: 128Mi\n  - name: busybox-sidecar\n    image: radial/busyboxplus:curl\n    command: [/bin/sh, -c, 'until curl localhost:8080/ConsumeCPU -d \"millicores=500&durationSec=3600\"; do sleep 5; done && sleep 3700']\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: 200m\nspec:\n  containers:\n  - name: app\n    image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4\n    resources:\n      requests:\n        cpu: 300m\n        memory: 64Mi\n  - name: busybox-sidecar\n    image: radial/busyboxplus:curl\n    command: [/bin/sh, -c, 'until curl localhost:8080/ConsumeCPU -d \"millicores=200&durationSec=3600\"; do sleep 5; done && sleep 3700']\n
OpenShiftKubernetes
oc get projects\noc api-resources -o wide\noc api-resources -o name\n\noc get nodes,ns,po,deploy,svc\n\noc describe node --all\n

Verify Metrics is enabled

kubectl get --raw /apis/metrics.k8s.io/\n

Get Node Description

kubectl describe node\n

Check Resource Usage

kubectl top pods\nkubectl top nodes\n

"},{"location":"openshift/pods/health-checks/#activities","title":"Activities","text":"Task Description Link Try It Yourself Probes Create some Health & Startup Probes to find what's causing an issue. Probes"},{"location":"openshift/pods/jobs/","title":"Jobs and CronJobs","text":"

Jobs A Job creates one or more Pods and ensures that a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (ie, Job) is complete. Deleting a Job will clean up the Pods it created.

CronJobs One CronJob object is like one line of a crontab (cron table) file. It runs a job periodically on a given schedule, written in Cron format.

All CronJob schedule: times are based on the timezone of the master where the job is initiated.

"},{"location":"openshift/pods/jobs/#resources","title":"Resources","text":"

OpenShift - Jobs - CronJobs

IKS - Jobs to Completion - Cron Jobs - Automated Tasks with Cron

"},{"location":"openshift/pods/jobs/#references","title":"References","text":"

It computes \u03c0 to 2000 places and prints it out

apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: pi\nspec:\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\",  \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never\n  backoffLimit: 4\n

Running in parallel

apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: pi\nspec:\n  parallelism: 2\n  completions: 3\n  template:\n    spec:\n      containers:\n        - name: pi\n          image: perl\n          command: [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never\n  backoffLimit: 4\n

apiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n  name: hello\nspec:\n  schedule: \"*/1 * * * *\"\n  jobTemplate:\n    spec:\n      template:\n        spec:\n          containers:\n          - name: hello\n            image: busybox\n            args:\n            - /bin/sh\n            - -c\n            - date; echo Hello from the Kubernetes cluster\n          restartPolicy: OnFailure\n
OpenShiftKubernetes

Gets Jobs

oc get jobs\n
Gets Job Description
oc describe job pi\n
Gets Pods from the Job
oc get pods\n
Deletes Job
oc delete job pi\n
Gets CronJob
oc get cronjobs\n
Describes CronJob
oc describe cronjobs pi\n
Gets Pods from CronJob
oc get pods\n
Deletes CronJob
oc delete cronjobs pi\n

Gets Jobs

kubectl get jobs\n
Gets Job Description
kubectl describe job pi\n
Gets Pods from the Job
kubectl get pods\n
Deletes Job
kubectl delete job pi\n
Gets CronJob
kubectl get cronjobs\n
Describes CronJob
kubectl describe cronjobs pi\n
Gets Pods from CronJob
kubectl get pods\n
Deletes CronJob
kubectl delete cronjobs pi\n

"},{"location":"openshift/pods/jobs/#activities","title":"Activities","text":"Task Description Link Try It Yourself Rolling Updates Lab Create a Rolling Update for your application. Rolling Updates Cron Jobs Lab Using Tekton to test new versions of applications. Crons Jobs"},{"location":"openshift/pods/multi-container/","title":"Multi-Containers Pod","text":"

Container images solve many real-world problems with existing packaging and deployment tools, but in addition to these significant benefits, containers offer us an opportunity to fundamentally re-think the way we build distributed applications. Just as service oriented architectures (SOA) encouraged the decomposition of applications into modular, focused services, containers should encourage the further decomposition of these services into closely cooperating modular containers. By virtue of establishing a boundary, containers enable users to build their services using modular, reusable components, and this in turn leads to services that are more reliable, more scalable and faster to build than applications built from monolithic containers.

"},{"location":"openshift/pods/multi-container/#resources","title":"Resources","text":"Kubernetes

Sidecar Logging

Shared Volume Communication

Toolkit Patterns

Brendan Burns Paper

"},{"location":"openshift/pods/multi-container/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  volumes:\n  - name: shared-data\n    emptyDir: {}\n  containers:\n  - name: app\n    image: bitnami/nginx\n    volumeMounts:\n      - name: shared-data\n        mountPath: /app\n    ports:\n    - containerPort: 8080\n  - name: sidecard\n    image: busybox\n    volumeMounts:\n    - name: shared-data\n      mountPath: /pod-data\n    command: ['sh', '-c', 'echo Hello from the side container > /pod-data/index.html && sleep 3600']\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  shareProcessNamespace: true\n  containers:\n  - name: app\n    image: bitnami/nginx\n    ports:\n    - containerPort: 8080\n  - name: sidecard\n    image: busybox\n    securityContext:\n      capabilities:\n        add:\n        - SYS_PTRACE\n    stdin: true\n    tty: true\n
OpenShiftKubernetes

Attach Pods Together

oc attach -it my-pod -c sidecard\n
ps ax\n
kill -HUP 7\n
ps ax\n

Attach Pods Together

kubectl attach -it my-pod -c sidecard\n
ps ax\n
kill -HUP 7\n
ps ax\n

"},{"location":"openshift/pods/multi-container/#activities","title":"Activities","text":"Task Description Link Try It Yourself Multiple Containers Build a container using legacy container image. Multiple Containers"},{"location":"openshift/pods/tagging/","title":"Labels, Selectors, and Annotations","text":"

Labels are key/value pairs that are attached to objects, such as pods. Labels are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. Labels can be used to organize and to select subsets of objects. Labels can be attached to objects at creation time and subsequently added and modified at any time. Each object can have a set of key/value labels defined. Each Key must be unique for a given object.

You can use Kubernetes annotations to attach arbitrary non-identifying metadata to objects. Clients such as tools and libraries can retrieve this metadata.

You can use either labels or annotations to attach metadata to Kubernetes objects. Labels can be used to select objects and to find collections of objects that satisfy certain conditions. In contrast, annotations are not used to identify and select objects. The metadata in an annotation can be small or large, structured or unstructured, and can include characters not permitted by labels.

"},{"location":"openshift/pods/tagging/#resources","title":"Resources","text":"OpenShiftKubernetes

CLI Label Commands

Labels

Annotations

"},{"location":"openshift/pods/tagging/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\n  labels:\n    app: foo\n    tier: frontend\n    env: dev\n  annotations:\n    imageregistry: \"https://hub.docker.com/\"\n    gitrepo: \"https://github.com/csantanapr/knative\"\nspec:\n  containers:\n  - name: app\n    image: bitnami/nginx\n
OpenShiftKubernetes

Change Labels on Objects

oc label pod my-pod boot=camp\n
Getting Pods based on their labels.
oc get pods --show-labels\n
oc get pods -L tier,env\n
oc get pods -l app\n
oc get pods -l tier=frontend\n
oc get pods -l 'env=dev,tier=frontend'\n
oc get pods -l 'env in (dev, test)'\n
oc get pods -l 'tier!=backend'\n
oc get pods -l 'env,env notin (prod)'\n
Delete the Pod.
oc delete pod my-pod\n

Change Labels on Objects

kubectl label pod my-pod boot=camp\n
Getting Pods based on their labels.
kubectl get pods --show-labels\n
kubectl get pods -L tier,env\n
kubectl get pods -l app\n
kubectl get pods -l tier=frontend\n
kubectl get pods -l 'env=dev,tier=frontend'\n
kubectl get pods -l 'env in (dev, test)'\n
kubectl get pods -l 'tier!=backend'\n
kubectl get pods -l 'env,env notin (prod)'\n
Delete the Pod.
kubectl delete pod my-pod\n

"},{"location":"openshift/pods/troubleshooting/","title":"Debugging Applications","text":"

Kubernetes provides tools to help troubleshoot and debug problems with applications.

Usually is getting familiar with how primitives objects interact with each other, checking the status of objects, and finally checking logs for any last resource clues.

"},{"location":"openshift/pods/troubleshooting/#resources","title":"Resources","text":"OpenShiftKubernetes

Debugging with ODO

Debugging Applications

Debugging Services

Debugging Replication Controllers

"},{"location":"openshift/pods/troubleshooting/#references","title":"References","text":"OpenShiftKubernetes

** MacOS/Linux/Windows command: **

oc apply -f https://gist.githubusercontent.com/csantanapr/e823b1bfab24186a26ae4f9ec1ff6091/raw/1e2a0cca964c7b54ce3df2fc3fbf33a232511877/debugk8s-bad.yaml\n

** Expose the service using port-forward **

oc port-forward service/my-service 8080:80 -n debug\n
** Try to access the service **
curl http://localhost:8080\n

** Try Out these Commands to Debug **

oc get pods --all-namespaces\n
oc project debug\n
oc get deployments\n
oc describe pod\n
oc explain Pod.spec.containers.resources.requests\n
oc explain Pod.spec.containers.livenessProbe\n
oc edit deployment\n
oc logs\n
oc get service\n
oc get ep\n
oc describe service\n
oc get pods --show-labels\n
oc get deployment --show-labels\n

** MacOS/Linux/Windows command: **

kubectl apply -f https://gist.githubusercontent.com/csantanapr/e823b1bfab24186a26ae4f9ec1ff6091/raw/1e2a0cca964c7b54ce3df2fc3fbf33a232511877/debugk8s-bad.yaml\n

** Expose the service using port-forward **

kubectl port-forward service/my-service 8080:80 -n debug\n
** Try to access the service **
curl http://localhost:8080\n

** Try Out these Commands to Debug **

kubectl get pods --all-namespaces\n
kubectl config set-context --current --namespace=debug\n
kubectl get deployments\n
kubectl describe pod\n
kubectl explain Pod.spec.containers.resources.requests\n
kubectl explain Pod.spec.containers.livenessProbe\n
kubectl edit deployment\n
kubectl logs\n
kubectl get service\n
kubectl get ep\n
kubectl describe service\n
kubectl get pods --show-labels\n
kubectl get deployment --show-labels\n

"},{"location":"openshift/pods/troubleshooting/#activities","title":"Activities","text":"

The continuous integration activities focus around Tekton the integration platform. These labs will show you how to build pipelines and test your code before deployment.

Task Description Link *** Try It Yourself *** Debugging Find which service is breaking in your cluster and find out why. Debugging"},{"location":"openshift/services-networking/","title":"Services","text":"

An abstract way to expose an application running on a set of Pods as a network service.

Kubernetes Pods are mortal. They are born and when they die, they are not resurrected. If you use a Deployment to run your app, it can create and destroy Pods dynamically.

Each Pod gets its own IP address, however in a Deployment, the set of Pods running in one moment in time could be different from the set of Pods running that application a moment later.

In Kubernetes, a Service is an abstraction which defines a logical set of Pods and a policy by which to access them (sometimes this pattern is called a micro-service). The set of Pods targeted by a Service is usually determined by a selector (see below for why you might want a Service without a selector).

If you\u2019re able to use Kubernetes APIs for service discovery in your application, you can query the API server for Endpoints, that get updated whenever the set of Pods in a Service changes.

For non-native applications, Kubernetes offers ways to place a network port or load balancer in between your application and the backend Pods.

"},{"location":"openshift/services-networking/#resources","title":"Resources","text":"OpenShift & Kubernetes

Services

Exposing Services

"},{"location":"openshift/services-networking/#references","title":"References","text":"
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment\n  labels:\n    app: nginx\n    version: v1\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n        version: v1\n    spec:\n      containers:\n      - name: nginx\n        image: bitnami/nginx\n        ports:\n        - containerPort: 8080\n          name: http\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: my-service\nspec:\n  selector:\n    app: nginx\n  ports:\n    - name: http\n      port: 80\n      targetPort: http\n
OpenShiftKubernetes Get Logs
oc logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
Get Logs
kubectl logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
\n
OpenShiftKubernetes Get Service
oc get svc\n
Get Service Description
oc describe svc my-service\n
Expose a Service
oc expose service <service_name>\n
Get Route for the Service
oc get route\n
Get Service
kubectl get svc\n
Get Service Description
kubectl describe svc my-service\n
Get Service Endpoints
kubectl get ep my-service\n
Expose a Deployment via a Service
kubectl expose deployment my-deployment --port 80 --target-port=http --selector app=nginx --name my-service-2 --type NodePort\n
"},{"location":"openshift/services-networking/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Creating Services Create two services with certain requirements. Setting up Services IKS Ingress Controller Configure Ingress on Free IKS Cluster Setting IKS Ingress"},{"location":"openshift/services-networking/ingress/","title":"Ingress","text":"

An API object that manages external access to the services in a cluster, typically HTTP.

Ingress can provide load balancing, SSL termination and name-based virtual hosting.

Ingress exposes HTTP and HTTPS routes from outside the cluster to services within the cluster. Traffic routing is controlled by rules defined on the Ingress resource.

"},{"location":"openshift/services-networking/ingress/#resources","title":"Resources","text":"OpenShiftKubernetes

Ingress Operator

Using Ingress Controllers

Ingress

Ingress Controllers

Minikube Ingress

"},{"location":"openshift/services-networking/ingress/#references","title":"References","text":"
apiVersion: networking.k8s.io/v1beta1 # for versions before 1.14 use extensions/v1beta1\nkind: Ingress\nmetadata:\n  name: example-ingress\nspec:\n  rules:\n  - host: hello-world.info\n    http:\n      paths:\n      - path: /\n        backend:\n          serviceName: web\n          servicePort: 8080\n
OpenShiftKubernetes View Ingress Status
oc describe clusteroperators/ingress\n
Describe default Ingress Controller
oc describe --namespace=openshift-ingress-operator ingresscontroller/default\n

Describe default Ingress Controller

kubectl get pods -n kube-system | grep ingress\n
kubectl create deployment web --image=bitnami/nginx\n
kubectl expose deployment web --name=web --port 8080\n
kubectl get svc web\n
kubectl get ingress\n
kubcetl describe ingress example-ingress\n
curl hello-world.info --resolve hello-world.info:80:<ADDRESS>\n

"},{"location":"openshift/services-networking/ingress/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** IKS Ingress Controller Configure Ingress on Free IKS Cluster Setting IKS Ingress"},{"location":"openshift/services-networking/routes/","title":"Routes","text":"

(OpenShift Only)

Routes are Openshift objects that expose services for external clients to reach them by name.

Routes can insecured or secured on creation using certificates.

The new route inherits the name from the service unless you specify one using the --name option.

"},{"location":"openshift/services-networking/routes/#resources","title":"Resources","text":"OpenShift

Routes

Route Configuration

Secured Routes

"},{"location":"openshift/services-networking/routes/#references","title":"References","text":"

** Route Creation **

apiVersion: v1\nkind: Route\nmetadata:\n  name: frontend\nspec:\n  to:\n    kind: Service\n    name: frontend\n
** Secured Route Creation **
apiVersion: v1\nkind: Route\nmetadata:\n  name: frontend\nspec:\n  to:\n    kind: Service\n    name: frontend\n  tls:\n    termination: edge\n

"},{"location":"openshift/services-networking/routes/#commands","title":"Commands","text":"OpenShift Create Route from YAML
oc apply -f route.yaml\n
Get Route
oc get route\n
Describe Route
oc get route <route_name>\n
Get Route YAML
oc get route <route_name> -o yaml\n
"},{"location":"openshift/services-networking/services/","title":"Services","text":"

An abstract way to expose an application running on a set of Pods as a network service.

Kubernetes Pods are mortal. They are born and when they die, they are not resurrected. If you use a Deployment to run your app, it can create and destroy Pods dynamically.

Each Pod gets its own IP address, however in a Deployment, the set of Pods running in one moment in time could be different from the set of Pods running that application a moment later.

In Kubernetes, a Service is an abstraction which defines a logical set of Pods and a policy by which to access them (sometimes this pattern is called a micro-service). The set of Pods targeted by a Service is usually determined by a selector (see below for why you might want a Service without a selector).

If you\u2019re able to use Kubernetes APIs for service discovery in your application, you can query the API server for Endpoints, that get updated whenever the set of Pods in a Service changes.

For non-native applications, Kubernetes offers ways to place a network port or load balancer in between your application and the backend Pods.

"},{"location":"openshift/services-networking/services/#resources","title":"Resources","text":"OpenShift & Kubernetes

Services

Exposing Services

"},{"location":"openshift/services-networking/services/#references","title":"References","text":"
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment\n  labels:\n    app: nginx\n    version: v1\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n        version: v1\n    spec:\n      containers:\n      - name: nginx\n        image: bitnami/nginx\n        ports:\n        - containerPort: 8080\n          name: http\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: my-service\nspec:\n  selector:\n    app: nginx\n  ports:\n    - name: http\n      port: 80\n      targetPort: http\n
OpenShiftKubernetes Get Logs
oc logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
Get Logs
kubectl logs\n
Use Stern to View Logs
brew install stern\nstern . -n default\n
\n
OpenShiftKubernetes Get Service
oc get svc\n
Get Service Description
oc describe svc my-service\n
Expose a Service
oc expose service <service_name>\n
Get Route for the Service
oc get route\n
Get Service
kubectl get svc\n
Get Service Description
kubectl describe svc my-service\n
Get Service Endpoints
kubectl get ep my-service\n
Expose a Deployment via a Service
kubectl expose deployment my-deployment --port 80 --target-port=http --selector app=nginx --name my-service-2 --type NodePort\n
"},{"location":"openshift/services-networking/services/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Creating Services Create two services with certain requirements. Setting up Services"},{"location":"openshift/state-persistence/","title":"State Persistence","text":"

State persistence in the context of Kubernetes/OpenShift refers to the ability to maintain and retain the state or data of applications even when they are stopped, restarted, or moved between nodes. This is achieved through the use of volumes, persistent volumes (PVs), and persistent volume claims (PVCs). Volumes provide a way to store and access data in a container, while PVs serve as the underlying storage resources provisioned by the cluster. PVCs act as requests made by applications for specific storage resources from the available PVs. By utilizing PVs and PVCs, applications can ensure that their state is preserved and accessible across pod restarts and migrations, enabling reliable and consistent data storage and retrieval throughout the cluster.

"},{"location":"openshift/state-persistence/#resources","title":"Resources","text":"

Volumes

Persistent Volumes

Persistent Volume Claims

"},{"location":"openshift/state-persistence/#references","title":"References","text":"

apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: my-pvc\nspec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 1Gi\n
In this example, we define a PVC named my-pvc with the following specifications:

accessModes specify that the volume can be mounted as read-write by a single node at a time (ReadWriteOnce). resources.requests.storage specifies the requested storage size for the PVC (1Gi).

"},{"location":"openshift/state-persistence/#activities","title":"Activities","text":"Task Description Link *** Try It Yourself *** Setting up Persistent Volumes Create a Persistent Volume that's accessible from a SQL Pod. Setting up Persistent Volumes"},{"location":"openshift/state-persistence/pv-pvc/","title":"PersistentVolumes and Claims","text":"

Managing storage is a distinct problem from managing compute instances. The PersistentVolume subsystem provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed.

A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.

A PersistentVolumeClaim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Claims can request specific size and access modes (e.g., they can be mounted once read/write or many times read-only).

While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than just size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the StorageClass resource.

Pods access storage by using the claim as a volume. Claims must exist in the same namespace as the Pod using the claim. The cluster finds the claim in the Pod\u2019s namespace and uses it to get the PersistentVolume backing the claim. The volume is then mounted to the host and into the Pod.

PersistentVolumes binds are exclusive, and since PersistentVolumeClaims are namespaced objects, mounting claims with \u201cMany\u201d modes (ROX, RWX) is only possible within one namespace.

"},{"location":"openshift/state-persistence/pv-pvc/#resources","title":"Resources","text":"OpenShiftKubernetes

Persistent Storage

Persistent Volume Types

Expanding Peristent Volumes

Persistent Volumes

Writing Portable Configurations

Configuring Persistent Volume Storage

"},{"location":"openshift/state-persistence/pv-pvc/#references","title":"References","text":"
kind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: my-pv\nspec:\n  storageClassName: local-storage\n  capacity:\n    storage: 128Mi\n  accessModes:\n    - ReadWriteOnce\n  hostPath:\n    path: \"/mnt/data-1\"\n
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: my-pvc\nspec:\n  storageClassName: local-storage\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 100Mi\n
kind: Pod\napiVersion: v1\nmetadata:\n  name: my-pod\nspec:\n  containers:\n  - name: nginx\n    image: busybox\n    command: ['sh', '-c', 'echo $(date):$HOSTNAME Hello Kubernetes! >> /mnt/data/message.txt && sleep 3600']\n    volumeMounts:\n    - mountPath: \"/mnt/data\"\n      name: my-data\n  volumes:\n  - name: my-data\n    persistentVolumeClaim:\n      claimName: my-pvc\n
OpenShiftKubernetes Get the Persistent Volumes in Project
oc get pv\n
Get the Persistent Volume Claims
oc get pvc\n
Get a specific Persistent Volume
oc get pv <pv_claim>\n
Get the Persistent Volume
kubectl get pv\n
Get the Persistent Volume Claims
kubectl get pvc\n
"},{"location":"openshift/state-persistence/volumes/","title":"Volumes","text":"

On-disk files in a Container are ephemeral, which presents some problems for non-trivial applications when running in Containers. First, when a Container crashes, kubelet will restart it, but the files will be lost - the Container starts with a clean state. Second, when running Containers together in a Pod it is often necessary to share files between those Containers. The Kubernetes Volume abstraction solves both of these problems.

Docker also has a concept of volumes, though it is somewhat looser and less managed. In Docker, a volume is simply a directory on disk or in another Container.

A Kubernetes volume, on the other hand, has an explicit lifetime - the same as the Pod that encloses it. Consequently, a volume outlives any Containers that run within the Pod, and data is preserved across Container restarts. Of course, when a Pod ceases to exist, the volume will cease to exist, too. Perhaps more importantly than this, Kubernetes supports many types of volumes, and a Pod can use any number of them simultaneously.

"},{"location":"openshift/state-persistence/volumes/#resources","title":"Resources","text":"OpenShiftKubernetes

Volume Lifecycle

Volumes

"},{"location":"openshift/state-persistence/volumes/#references","title":"References","text":"
apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-pod\nspec:\n  containers:\n  - image: busybox\n    command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600']\n    name: busybox\n    volumeMounts:\n    - mountPath: /cache\n      name: cache-volume\n  volumes:\n  - name: cache-volume\n    emptyDir: {}\n
apiVersion: v1\nkind: Pod\nmetadata:\n  name: test-pd\nspec:\n  containers:\n  - image: bitnami/nginx\n    name: test-container\n    volumeMounts:\n    - mountPath: /test-pd\n      name: test-volume\n  volumes:\n  - name: test-volume\n    hostPath:\n      # directory location on host\n      path: /data\n      # this field is optional\n      type: Directory\n
"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..0f8724e --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000..3320740 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/fonts/IBMPlexMono-Light.ttf b/stylesheets/fonts/IBMPlexMono-Light.ttf new file mode 100644 index 0000000..88cbd9b Binary files /dev/null and b/stylesheets/fonts/IBMPlexMono-Light.ttf differ diff --git a/stylesheets/fonts/IBMPlexMono-Medium.ttf b/stylesheets/fonts/IBMPlexMono-Medium.ttf new file mode 100644 index 0000000..3dea23c Binary files /dev/null and b/stylesheets/fonts/IBMPlexMono-Medium.ttf differ diff --git a/stylesheets/fonts/IBMPlexMono-Regular.ttf b/stylesheets/fonts/IBMPlexMono-Regular.ttf new file mode 100644 index 0000000..93331e2 Binary files /dev/null and b/stylesheets/fonts/IBMPlexMono-Regular.ttf differ diff --git a/stylesheets/index.css b/stylesheets/index.css new file mode 100644 index 0000000..da8ee09 --- /dev/null +++ b/stylesheets/index.css @@ -0,0 +1,46 @@ +:root > * { + --md-primary-fg-color: #161616; + --md-footer-bg-color: #393939; +} + +@font-face { + font-family: "IBM Plex Mono Medium"; + src: url("fonts/IBMPlexMono-Medium.ttf") format("truetype"); +} + +@font-face { + font-family: "IBM Plex Mono Regular"; + src: url("fonts/IBMPlexMono-Regular.ttf") format("truetype"); +} + +@font-face { + font-family: "IBM Plex Mono Light"; + src: url("fonts/IBMPlexMono-Light.ttf") format("truetype"); +} + +.header{ + font-size: 15px; + pointer-events: none; +} + +.sub-section{ + font-size: 15px; + text-indent: 10px; +} + +h1 { + font-family: "IBM Plex Mono Medium"; + font-size: 15px; +} + +h2 { + font-family: "IBM Plex Mono Regular"; + font-size: 15px; + font-weight: bold; +} + +h3 { + font-family: "IBM Plex Mono Thin"; + font-size: 15px; + text-indent: 10px; +} \ No newline at end of file diff --git a/stylesheets/sample.css b/stylesheets/sample.css new file mode 100644 index 0000000..e69de29