diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..3f0df0f71 --- /dev/null +++ b/404.html @@ -0,0 +1,1978 @@ + + + + + + + + + + + + + + + + + + + Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 000000000..d948bddac --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +qodo-merge-docs.qodo.ai diff --git a/assets/favicon.ico b/assets/favicon.ico new file mode 100644 index 000000000..594427242 Binary files /dev/null and b/assets/favicon.ico differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/images/social/chrome-extension/data_privacy.png b/assets/images/social/chrome-extension/data_privacy.png new file mode 100644 index 000000000..9398a7d61 Binary files /dev/null and b/assets/images/social/chrome-extension/data_privacy.png differ diff --git a/assets/images/social/chrome-extension/features.png b/assets/images/social/chrome-extension/features.png new file mode 100644 index 000000000..ac7987cbf Binary files /dev/null and b/assets/images/social/chrome-extension/features.png differ diff --git a/assets/images/social/chrome-extension/index.png b/assets/images/social/chrome-extension/index.png new file mode 100644 index 000000000..998f628f3 Binary files /dev/null and b/assets/images/social/chrome-extension/index.png differ diff --git a/assets/images/social/core-abilities/code_oriented_yaml.png b/assets/images/social/core-abilities/code_oriented_yaml.png new file mode 100644 index 000000000..c6725a0c7 Binary files /dev/null and b/assets/images/social/core-abilities/code_oriented_yaml.png differ diff --git a/assets/images/social/core-abilities/compression_strategy.png b/assets/images/social/core-abilities/compression_strategy.png new file mode 100644 index 000000000..c546589c8 Binary files /dev/null and b/assets/images/social/core-abilities/compression_strategy.png differ diff --git a/assets/images/social/core-abilities/dynamic_context.png b/assets/images/social/core-abilities/dynamic_context.png new file mode 100644 index 000000000..c44d04eba Binary files /dev/null and b/assets/images/social/core-abilities/dynamic_context.png differ diff --git a/assets/images/social/core-abilities/impact_evaluation.png b/assets/images/social/core-abilities/impact_evaluation.png new file mode 100644 index 000000000..8e3d5e897 Binary files /dev/null and b/assets/images/social/core-abilities/impact_evaluation.png differ diff --git a/assets/images/social/core-abilities/index.png b/assets/images/social/core-abilities/index.png new file mode 100644 index 000000000..16370c67e Binary files /dev/null and b/assets/images/social/core-abilities/index.png differ diff --git a/assets/images/social/core-abilities/interactivity.png b/assets/images/social/core-abilities/interactivity.png new file mode 100644 index 000000000..3ee59bc4d Binary files /dev/null and b/assets/images/social/core-abilities/interactivity.png differ diff --git a/assets/images/social/core-abilities/metadata.png b/assets/images/social/core-abilities/metadata.png new file mode 100644 index 000000000..8b923f022 Binary files /dev/null and b/assets/images/social/core-abilities/metadata.png differ diff --git a/assets/images/social/core-abilities/self_reflection.png b/assets/images/social/core-abilities/self_reflection.png new file mode 100644 index 000000000..db3bfa8a8 Binary files /dev/null and b/assets/images/social/core-abilities/self_reflection.png differ diff --git a/assets/images/social/core-abilities/static_code_analysis.png b/assets/images/social/core-abilities/static_code_analysis.png new file mode 100644 index 000000000..f757e1239 Binary files /dev/null and b/assets/images/social/core-abilities/static_code_analysis.png differ diff --git a/assets/images/social/faq/index.png b/assets/images/social/faq/index.png new file mode 100644 index 000000000..55123fd83 Binary files /dev/null and b/assets/images/social/faq/index.png differ diff --git a/assets/images/social/finetuning_benchmark/index.png b/assets/images/social/finetuning_benchmark/index.png new file mode 100644 index 000000000..c4178fce6 Binary files /dev/null and b/assets/images/social/finetuning_benchmark/index.png differ diff --git a/assets/images/social/index.png b/assets/images/social/index.png new file mode 100644 index 000000000..51316f8f8 Binary files /dev/null and b/assets/images/social/index.png differ diff --git a/assets/images/social/installation/azure.png b/assets/images/social/installation/azure.png new file mode 100644 index 000000000..750632eb3 Binary files /dev/null and b/assets/images/social/installation/azure.png differ diff --git a/assets/images/social/installation/bitbucket.png b/assets/images/social/installation/bitbucket.png new file mode 100644 index 000000000..ccdfc4099 Binary files /dev/null and b/assets/images/social/installation/bitbucket.png differ diff --git a/assets/images/social/installation/github.png b/assets/images/social/installation/github.png new file mode 100644 index 000000000..24f4a7dec Binary files /dev/null and b/assets/images/social/installation/github.png differ diff --git a/assets/images/social/installation/gitlab.png b/assets/images/social/installation/gitlab.png new file mode 100644 index 000000000..078a2d7dc Binary files /dev/null and b/assets/images/social/installation/gitlab.png differ diff --git a/assets/images/social/installation/index.png b/assets/images/social/installation/index.png new file mode 100644 index 000000000..ef12773df Binary files /dev/null and b/assets/images/social/installation/index.png differ diff --git a/assets/images/social/installation/locally.png b/assets/images/social/installation/locally.png new file mode 100644 index 000000000..3d2e70301 Binary files /dev/null and b/assets/images/social/installation/locally.png differ diff --git a/assets/images/social/installation/pr_agent_pro.png b/assets/images/social/installation/pr_agent_pro.png new file mode 100644 index 000000000..02a85c561 Binary files /dev/null and b/assets/images/social/installation/pr_agent_pro.png differ diff --git a/assets/images/social/overview/data_privacy.png b/assets/images/social/overview/data_privacy.png new file mode 100644 index 000000000..9398a7d61 Binary files /dev/null and b/assets/images/social/overview/data_privacy.png differ diff --git a/assets/images/social/overview/index.png b/assets/images/social/overview/index.png new file mode 100644 index 000000000..51316f8f8 Binary files /dev/null and b/assets/images/social/overview/index.png differ diff --git a/assets/images/social/overview/pr_agent_pro.png b/assets/images/social/overview/pr_agent_pro.png new file mode 100644 index 000000000..02a85c561 Binary files /dev/null and b/assets/images/social/overview/pr_agent_pro.png differ diff --git a/assets/images/social/tools/analyze.png b/assets/images/social/tools/analyze.png new file mode 100644 index 000000000..de200dbae Binary files /dev/null and b/assets/images/social/tools/analyze.png differ diff --git a/assets/images/social/tools/ask.png b/assets/images/social/tools/ask.png new file mode 100644 index 000000000..316446fc6 Binary files /dev/null and b/assets/images/social/tools/ask.png differ diff --git a/assets/images/social/tools/ci_feedback.png b/assets/images/social/tools/ci_feedback.png new file mode 100644 index 000000000..324686634 Binary files /dev/null and b/assets/images/social/tools/ci_feedback.png differ diff --git a/assets/images/social/tools/custom_labels.png b/assets/images/social/tools/custom_labels.png new file mode 100644 index 000000000..a8ffa8973 Binary files /dev/null and b/assets/images/social/tools/custom_labels.png differ diff --git a/assets/images/social/tools/custom_prompt.png b/assets/images/social/tools/custom_prompt.png new file mode 100644 index 000000000..b4d272f87 Binary files /dev/null and b/assets/images/social/tools/custom_prompt.png differ diff --git a/assets/images/social/tools/describe.png b/assets/images/social/tools/describe.png new file mode 100644 index 000000000..2402adcf1 Binary files /dev/null and b/assets/images/social/tools/describe.png differ diff --git a/assets/images/social/tools/documentation.png b/assets/images/social/tools/documentation.png new file mode 100644 index 000000000..498d9e760 Binary files /dev/null and b/assets/images/social/tools/documentation.png differ diff --git a/assets/images/social/tools/help.png b/assets/images/social/tools/help.png new file mode 100644 index 000000000..67385c460 Binary files /dev/null and b/assets/images/social/tools/help.png differ diff --git a/assets/images/social/tools/improve.png b/assets/images/social/tools/improve.png new file mode 100644 index 000000000..94558f749 Binary files /dev/null and b/assets/images/social/tools/improve.png differ diff --git a/assets/images/social/tools/improve_component.png b/assets/images/social/tools/improve_component.png new file mode 100644 index 000000000..0a8e457a9 Binary files /dev/null and b/assets/images/social/tools/improve_component.png differ diff --git a/assets/images/social/tools/index.png b/assets/images/social/tools/index.png new file mode 100644 index 000000000..614cc31ad Binary files /dev/null and b/assets/images/social/tools/index.png differ diff --git a/assets/images/social/tools/review.png b/assets/images/social/tools/review.png new file mode 100644 index 000000000..2a102eb0a Binary files /dev/null and b/assets/images/social/tools/review.png differ diff --git a/assets/images/social/tools/similar_code.png b/assets/images/social/tools/similar_code.png new file mode 100644 index 000000000..3242b1b81 Binary files /dev/null and b/assets/images/social/tools/similar_code.png differ diff --git a/assets/images/social/tools/similar_issues.png b/assets/images/social/tools/similar_issues.png new file mode 100644 index 000000000..591eba166 Binary files /dev/null and b/assets/images/social/tools/similar_issues.png differ diff --git a/assets/images/social/tools/test.png b/assets/images/social/tools/test.png new file mode 100644 index 000000000..69691c749 Binary files /dev/null and b/assets/images/social/tools/test.png differ diff --git a/assets/images/social/tools/update_changelog.png b/assets/images/social/tools/update_changelog.png new file mode 100644 index 000000000..64a3717cc Binary files /dev/null and b/assets/images/social/tools/update_changelog.png differ diff --git a/assets/images/social/usage-guide/EXAMPLE_BEST_PRACTICE.png b/assets/images/social/usage-guide/EXAMPLE_BEST_PRACTICE.png new file mode 100644 index 000000000..bd2fecde6 Binary files /dev/null and b/assets/images/social/usage-guide/EXAMPLE_BEST_PRACTICE.png differ diff --git a/assets/images/social/usage-guide/PR_agent_pro_models.png b/assets/images/social/usage-guide/PR_agent_pro_models.png new file mode 100644 index 000000000..870b443d8 Binary files /dev/null and b/assets/images/social/usage-guide/PR_agent_pro_models.png differ diff --git a/assets/images/social/usage-guide/additional_configurations.png b/assets/images/social/usage-guide/additional_configurations.png new file mode 100644 index 000000000..e5f9525a3 Binary files /dev/null and b/assets/images/social/usage-guide/additional_configurations.png differ diff --git a/assets/images/social/usage-guide/automations_and_usage.png b/assets/images/social/usage-guide/automations_and_usage.png new file mode 100644 index 000000000..85e5656cc Binary files /dev/null and b/assets/images/social/usage-guide/automations_and_usage.png differ diff --git a/assets/images/social/usage-guide/changing_a_model.png b/assets/images/social/usage-guide/changing_a_model.png new file mode 100644 index 000000000..f16b03b47 Binary files /dev/null and b/assets/images/social/usage-guide/changing_a_model.png differ diff --git a/assets/images/social/usage-guide/configuration_options.png b/assets/images/social/usage-guide/configuration_options.png new file mode 100644 index 000000000..ab2daad45 Binary files /dev/null and b/assets/images/social/usage-guide/configuration_options.png differ diff --git a/assets/images/social/usage-guide/index.png b/assets/images/social/usage-guide/index.png new file mode 100644 index 000000000..b636b68c6 Binary files /dev/null and b/assets/images/social/usage-guide/index.png differ diff --git a/assets/images/social/usage-guide/introduction.png b/assets/images/social/usage-guide/introduction.png new file mode 100644 index 000000000..5e3f2bfee Binary files /dev/null and b/assets/images/social/usage-guide/introduction.png differ diff --git a/assets/images/social/usage-guide/mail_notifications.png b/assets/images/social/usage-guide/mail_notifications.png new file mode 100644 index 000000000..977fd393c Binary files /dev/null and b/assets/images/social/usage-guide/mail_notifications.png differ diff --git a/assets/javascripts/bundle.83f73b43.min.js b/assets/javascripts/bundle.83f73b43.min.js new file mode 100644 index 000000000..43d8b70f6 --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.83f73b43.min.js.map + diff --git a/assets/javascripts/bundle.83f73b43.min.js.map b/assets/javascripts/bundle.83f73b43.min.js.map new file mode 100644 index 000000000..fe920b7d6 --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + + + + \ No newline at end of file diff --git a/chrome-extension/features/index.html b/chrome-extension/features/index.html new file mode 100644 index 000000000..f2c0c5d20 --- /dev/null +++ b/chrome-extension/features/index.html @@ -0,0 +1,2209 @@ + + + + + + + + + + + + + + + + + + + + + + + Features - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Features

+ +

PR chat

+

The PR-Chat feature allows to freely chat with your PR code, within your GitHub environment. +It will seamlessly use the PR as context to your chat session, and provide AI-powered feedback.

+

To enable private chat, simply install the Qodo Merge Chrome extension. After installation, each PR's file-changed tab will include a chat box, where you may ask questions about your code. +This chat session is private, and won't be visible to other users.

+

All open-source repositories are supported. +For private repositories, you will also need to install Qodo Merge Pro, After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.

+

Context-aware PR chat

+

Qodo Merge constructs a comprehensive context for each pull request, incorporating the PR description, commit messages, and code changes with extended dynamic context. This contextual information, along with additional PR-related data, forms the foundation for an AI-powered chat session. The agent then leverages this rich context to provide intelligent, tailored responses to user inquiries about the pull request.

+

+

+

Toolbar extension

+

With Qodo Merge Chrome extension, it's easier than ever to interactively configure and experiment with the different tools and configuration options.

+

For private repositories, after you found the setup that works for you, you can also easily export it as a persistent configuration file, and use it for automatic commands.

+

+

+

Qodo Merge filters

+

Qodo Merge filters is a sidepanel option. that allows you to filter different message in the conversation tab.

+

For example, you can choose to present only message from Qodo Merge, or filter those messages, focusing only on user's comments.

+

+

+

Enhanced code suggestions

+

Qodo Merge Chrome extension adds the following capabilities to code suggestions tool's comments:

+
    +
  • Auto-expand the table when you are viewing a code block, to avoid clipping.
  • +
  • Adding a "quote-and-reply" button, that enables to address and comment on a specific suggestion (for example, asking the author to fix the issue)
  • +
+

+

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/chrome-extension/index.html b/chrome-extension/index.html new file mode 100644 index 000000000..3f9a208c1 --- /dev/null +++ b/chrome-extension/index.html @@ -0,0 +1,2087 @@ + + + + + + + + + + + + + + + + + + + + + + + Qodo Merge Chrome Extension - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Qodo Merge Chrome Extension

+ +

Qodo Merge Chrome extension is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.

+

With a single-click installation you will gain access to a context-aware chat on your pull requests code, a toolbar extension with multiple AI feedbacks, Qodo Merge filters, and additional abilities.

+

The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.

+

For private repositories, you will need to install Qodo Merge Pro in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed). +For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided link.

+

+

Supported browsers

+

The extension is supported on all Chromium-based browsers, including Google Chrome, Arc, Opera, Brave, and Microsoft Edge.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/code_oriented_yaml/index.html b/core-abilities/code_oriented_yaml/index.html new file mode 100644 index 000000000..a1be6acb5 --- /dev/null +++ b/core-abilities/code_oriented_yaml/index.html @@ -0,0 +1,2128 @@ + + + + + + + + + + + + + + + + + + + + + + + Code-oriented YAML - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Code-oriented YAML

+ +

Overview

+

TBD

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/compression_strategy/index.html b/core-abilities/compression_strategy/index.html new file mode 100644 index 000000000..8a7ae1ae8 --- /dev/null +++ b/core-abilities/compression_strategy/index.html @@ -0,0 +1,2215 @@ + + + + + + + + + + + + + + + + + + + + + + + Compression strategy - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Compression strategy

+ +

Overview - PR Compression Strategy

+

There are two scenarios:

+
    +
  1. The PR is small enough to fit in a single prompt (including system and user prompt)
  2. +
  3. The PR is too large to fit in a single prompt (including system and user prompt)
  4. +
+

For both scenarios, we first use the following strategy

+

Repo language prioritization strategy

+

We prioritize the languages of the repo based on the following criteria:

+
    +
  1. Exclude binary files and non code files (e.g. images, pdfs, etc)
  2. +
  3. Given the main languages used in the repo
  4. +
  5. We sort the PR files by the most common languages in the repo (in descending order):
  6. +
  7. [[file.py, file2.py],[file3.js, file4.jsx],[readme.md]]
  8. +
+

Small PR

+

In this case, we can fit the entire PR in a single prompt: +1. Exclude binary files and non code files (e.g. images, pdfs, etc) +2. We Expand the surrounding context of each patch to 3 lines above and below the patch

+

Large PR

+

Motivation

+

Pull Requests can be very long and contain a lot of information with varying degree of relevance to the pr-agent. +We want to be able to pack as much information as possible in a single LMM prompt, while keeping the information relevant to the pr-agent.

+

Compression strategy

+

We prioritize additions over deletions: + - Combine all deleted files into a single list (deleted files) + - File patches are a list of hunks, remove all hunks of type deletion-only from the hunks in the file patch

+

Adaptive and token-aware file patch fitting

+

We use tiktoken to tokenize the patches after the modifications described above, and we use the following strategy to fit the patches into the prompt:

+
    +
  1. Within each language we sort the files by the number of tokens in the file (in descending order):
      +
    • [[file2.py, file.py],[file4.jsx, file3.js],[readme.md]]
    • +
    +
  2. +
  3. Iterate through the patches in the order described above
  4. +
  5. Add the patches to the prompt until the prompt reaches a certain buffer from the max token length
  6. +
  7. If there are still patches left, add the remaining patches as a list called other modified files to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
  8. +
  9. If we haven't reached the max token length, add the deleted files to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
  10. +
+

Example

+

Core Abilities

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/dynamic_context/index.html b/core-abilities/dynamic_context/index.html new file mode 100644 index 000000000..7ea2a1484 --- /dev/null +++ b/core-abilities/dynamic_context/index.html @@ -0,0 +1,2256 @@ + + + + + + + + + + + + + + + + + + + + + + + Dynamic context - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Dynamic context

+ +

TL;DR

+

Qodo Merge uses an asymmetric and dynamic context strategy to improve AI analysis of code changes in pull requests. +It provides more context before changes than after, and dynamically adjusts the context based on code structure (e.g., enclosing functions or classes). +This approach balances providing sufficient context for accurate analysis, while avoiding needle-in-the-haystack information overload that could degrade AI performance or exceed token limits.

+

Introduction

+

Pull request code changes are retrieved in a unified diff format, showing three lines of context before and after each modified section, with additions marked by '+' and deletions by '-'. +

@@ -12,5 +12,5 @@ def func1():
+ code line that already existed in the file...
+ code line that already existed in the file...
+ code line that already existed in the file....
+-code line that was removed in the PR
++new code line added in the PR
+ code line that already existed in the file...
+ code line that already existed in the file...
+ code line that already existed in the file...
+
+@@ -26,2 +26,4 @@ def func2():
+...
+

+

This unified diff format can be challenging for AI models to interpret accurately, as it provides limited context for understanding the full scope of code changes. +The presentation of code using '+', '-', and ' ' symbols to indicate additions, deletions, and unchanged lines respectively also differs from the standard code formatting typically used to train AI models.

+

Challenges of expanding the context window

+

While expanding the context window is technically feasible, it presents a more fundamental trade-off:

+

Pros:

+
    +
  • Enhanced context allows the model to better comprehend and localize the code changes, results (potentially) in more precise analysis and suggestions. Without enough context, the model may struggle to understand the code changes and provide relevant feedback.
  • +
+

Cons:

+
    +
  • +

    Excessive context may overwhelm the model with extraneous information, creating a "needle in a haystack" scenario where focusing on the relevant details (the code that actually changed) becomes challenging. +LLM quality is known to degrade when the context gets larger. +Pull requests often encompass multiple changes across many files, potentially spanning hundreds of lines of modified code. This complexity presents a genuine risk of overwhelming the model with excessive context.

    +
  • +
  • +

    Increased context expands the token count, increasing processing time and cost, and may prevent the model from processing the entire pull request in a single pass.

    +
  • +
+

Asymmetric and dynamic context

+

To address these challenges, Qodo Merge employs an asymmetric and dynamic context strategy, providing the model with more focused and relevant context information for each code change.

+

Asymmetric:

+

We start by recognizing that the context preceding a code change is typically more crucial for understanding the modification than the context following it. +Consequently, Qodo Merge implements an asymmetric context policy, decoupling the context window into two distinct segments: one for the code before the change and another for the code after.

+

By independently adjusting each context window, Qodo Merge can supply the model with a more tailored and pertinent context for individual code changes.

+

Dynamic:

+

We also employ a "dynamic" context strategy. +We start by recognizing that the optimal context for a code change often corresponds to its enclosing code component (e.g., function, class), rather than a fixed number of lines. +Consequently, we dynamically adjust the context window based on the code's structure, ensuring the model receives the most pertinent information for each modification.

+

To prevent overwhelming the model with excessive context, we impose a limit on the number of lines searched when identifying the enclosing component. +This balance allows for comprehensive understanding while maintaining efficiency and limiting context token usage.

+

Appendix - relevant configuration options

+
[config]
+patch_extension_skip_types =[".md",".txt"]  # Skip files with these extensions when trying to extend the context
+allow_dynamic_context=true                  # Allow dynamic context extension
+max_extra_lines_before_dynamic_context = 8  # will try to include up to X extra lines before the hunk in the patch, until we reach an enclosing function or class
+patch_extra_lines_before = 3                # Number of extra lines (+3 default ones) to include before each hunk in the patch
+patch_extra_lines_after = 1                 # Number of extra lines (+3 default ones) to include after each hunk in the patch
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/impact_evaluation/index.html b/core-abilities/impact_evaluation/index.html new file mode 100644 index 000000000..6fb20ad93 --- /dev/null +++ b/core-abilities/impact_evaluation/index.html @@ -0,0 +1,2235 @@ + + + + + + + + + + + + + + + + + + + + + + + Impact evaluation - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Overview - Impact Evaluation 💎

+

Demonstrating the return on investment (ROI) of AI-powered initiatives is crucial for modern organizations. +To address this need, Qodo Merge has developed an AI impact measurement tools and metrics, providing advanced analytics to help businesses quantify the tangible benefits of AI adoption in their PR review process.

+

Auto Impact Validator - Real-Time Tracking of Implemented Qodo Merge Suggestions

+

How It Works

+

When a user pushes a new commit to the pull request, Qodo Merge automatically compares the updated code against the previous suggestions, marking them as implemented if the changes address these recommendations, whether directly or indirectly:

+
    +
  1. Direct Implementation: The user directly addresses the suggestion as-is in the PR, either by clicking on the "apply code suggestion" checkbox or by making the changes manually.
  2. +
  3. Indirect Implementation: Qodo Merge recognizes when a suggestion's intent is fulfilled, even if the exact code changes differ from the original recommendation. It marks these suggestions as implemented, acknowledging that users may achieve the same goal through alternative solutions.
  4. +
+

Real-Time Visual Feedback

+

Upon confirming that a suggestion was implemented, Qodo Merge automatically adds a ✅ (check mark) to the relevant suggestion, enabling transparent tracking of Qodo Merge's impact analysis. +Qodo Merge will also add, inside the relevant suggestions, an explanation of how the new code was impacted by each suggestion.

+

Suggestion_checkmark

+

Dashboard Metrics

+

The dashboard provides macro-level insights into the overall impact of Qodo Merge on the pull-request process with key productivity metrics.

+

By offering clear, data-driven evidence of Qodo Merge's impact, it empowers leadership teams to make informed decisions about the tool's effectiveness and ROI.

+

Here are key metrics that the dashboard tracks:

+

Qodo Merge Impacts per 1K Lines

+

Dashboard

+
+

Explanation: for every 1K lines of code (additions/edits), Qodo Merge had on average ~X suggestions implemented.

+
+

Why This Metric Matters:

+
    +
  1. Standardized and Comparable Measurement: By measuring impacts per 1K lines of code additions, you create a standardized metric that can be compared across different projects, teams, customers, and time periods. This standardization is crucial for meaningful analysis, benchmarking, and identifying where Qodo Merge is most effective.
  2. +
  3. Accounts for PR Variability and Incentivizes Quality: This metric addresses the fact that "Not all PRs are created equal." By normalizing against lines of code rather than PR count, you account for the variability in PR sizes and focus on the quality and impact of suggestions rather than just the number of PRs affected.
  4. +
  5. Quantifies Value and ROI: The metric directly correlates with the value Qodo Merge is providing, showing how frequently it offers improvements relative to the amount of new code being written. This provides a clear, quantifiable way to demonstrate Qodo Merge's return on investment to stakeholders.
  6. +
+

Suggestion Effectiveness Across Categories

+

Impacted_Suggestion_Score

+
+

Explanation: This chart illustrates the distribution of implemented suggestions across different categories, enabling teams to better understand Qodo Merge's impact on various aspects of code quality and development practices.

+
+

Suggestion Score Distribution

+

Impacted_Suggestion_Score

+
+

Explanation: The distribution of the suggestion score for the implemented suggestions, ensuring that higher-scored suggestions truly represent more significant improvements.

+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/index.html b/core-abilities/index.html new file mode 100644 index 000000000..06c979936 --- /dev/null +++ b/core-abilities/index.html @@ -0,0 +1,2142 @@ + + + + + + + + + + + + + + + + + + + + + + + Core Abilities - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Core Abilities

+

Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:

+ +

Blogs

+

Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks. +These resources provide more comprehensive insights into leveraging LLMs for software development.

+

Code Generation and LLMs

+ +

Development Processes

+ +

Cost Optimization

+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/interactivity/index.html b/core-abilities/interactivity/index.html new file mode 100644 index 000000000..ca8377d3d --- /dev/null +++ b/core-abilities/interactivity/index.html @@ -0,0 +1,2128 @@ + + + + + + + + + + + + + + + + + + + + + + + Interactivity - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Interactivity

+ +

Interactive invocation 💎

+

TBD

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/metadata/index.html b/core-abilities/metadata/index.html new file mode 100644 index 000000000..7c8f699ee --- /dev/null +++ b/core-abilities/metadata/index.html @@ -0,0 +1,2177 @@ + + + + + + + + + + + + + + + + + + + + + + + Local and global metadata - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Local and global metadata

+ +

Local and global metadata injection with multi-stage analysis

+

(1) +Qodo Merge initially retrieves for each PR the following data:

+
    +
  • PR title and branch name
  • +
  • PR original description
  • +
  • Commit messages history
  • +
  • PR diff patches, in hunk diff format
  • +
  • The entire content of the files that were modified in the PR
  • +
+
+

Tip: Organization-level metadata

+

In addition to the inputs above, Qodo Merge can incorporate supplementary preferences provided by the user, like extra_instructions and organization best practices. This information can be used to enhance the PR analysis.

+
+

(2) +By default, the first command that Qodo Merge executes is describe, which generates three types of outputs:

+
    +
  • PR Type (e.g. bug fix, feature, refactor, etc)
  • +
  • PR Description - a bullet point summary of the PR
  • +
  • Changes walkthrough - for each modified file, provide a one-line summary followed by a detailed bullet point list of the changes.
  • +
+

These AI-generated outputs are now considered as part of the PR metadata, and can be used in subsequent commands like review and improve. +This effectively enables multi-stage chain-of-thought analysis, without doing any additional API calls which will cost time and money.

+

For example, when generating code suggestions for different files, Qodo Merge can inject the AI-generated "Changes walkthrough" file summary in the prompt:

+
## File: 'src/file1.py'
+### AI-generated file summary:
+- edited function `func1` that does X
+- Removed function `func2` that was not used
+- ....
+
+@@ ... @@ def func1():
+__new hunk__
+11  unchanged code line0 in the PR
+12  unchanged code line1 in the PR
+13 +new code line2 added in the PR
+14  unchanged code line3 in the PR
+__old hunk__
+ unchanged code line0
+ unchanged code line1
+-old code line2 removed in the PR
+ unchanged code line3
+
+@@ ... @@ def func2():
+__new hunk__
+...
+__old hunk__
+...
+
+

(3) The entire PR files that were retrieved are also used to expand and enhance the PR context (see Dynamic Context).

+

(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level. +This comprehensive approach enables Qodo Merge AI models to generate more precise and contextually relevant suggestions and feedback.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/self_reflection/index.html b/core-abilities/self_reflection/index.html new file mode 100644 index 000000000..33547bbe5 --- /dev/null +++ b/core-abilities/self_reflection/index.html @@ -0,0 +1,2237 @@ + + + + + + + + + + + + + + + + + + + + + + + Self-reflection - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Self-reflection

+ +

TL;DR

+

Qodo Merge implements a self-reflection process where the AI model reflects, scores, and re-ranks its own suggestions, eliminating irrelevant or incorrect ones. +This approach improves the quality and relevance of suggestions, saving users time and enhancing their experience. +Configuration options allow users to set a score threshold for further filtering out suggestions.

+

Introduction - Efficient Review with Hierarchical Presentation

+

Given that not all generated code suggestions will be relevant, it is crucial to enable users to review them in a fast and efficient way, allowing quick identification and filtering of non-applicable ones.

+

To achieve this goal, Qodo Merge offers a dedicated hierarchical structure when presenting suggestions to users:

+
    +
  • A "category" section groups suggestions by their category, allowing users to quickly dismiss irrelevant suggestions.
  • +
  • Each suggestion is first described by a one-line summary, which can be expanded to a full description by clicking on a collapsible.
  • +
  • Upon expanding a suggestion, the user receives a more comprehensive description, and a code snippet demonstrating the recommendation.
  • +
+
+

Fast Review

+

This hierarchical structure is designed to facilitate rapid review of each suggestion, with users spending an average of ~5-10 seconds per item.

+
+

Self-reflection and Re-ranking

+

The AI model is initially tasked with generating suggestions, and outputting them in order of importance. +However, in practice we observe that models often struggle to simultaneously generate high-quality code suggestions and rank them well in a single pass. +Furthermore, the initial set of generated suggestions sometimes contains easily identifiable errors.

+

To address these issues, we implemented a "self-reflection" process that refines suggestion ranking and eliminates irrelevant or incorrect proposals. +This process consists of the following steps:

+
    +
  1. Presenting the generated suggestions to the model in a follow-up call.
  2. +
  3. Instructing the model to score each suggestion on a scale of 0-10 and provide a rationale for the assigned score.
  4. +
  5. Utilizing these scores to re-rank the suggestions and filter out incorrect ones (with a score of 0).
  6. +
  7. Optionally, filtering out all suggestions below a user-defined score threshold.
  8. +
+

Note that presenting all generated suggestions simultaneously provides the model with a comprehensive context, enabling it to make more informed decisions compared to evaluating each suggestion individually.

+

To conclude, the self-reflection process enables Qodo Merge to prioritize suggestions based on their importance, eliminate inaccurate or irrelevant proposals, and optionally exclude suggestions that fall below a specified threshold of significance. +This results in a more refined and valuable set of suggestions for the user, saving time and improving the overall experience.

+

Example Results

+

self_reflection +self_reflection

+

Appendix - Relevant Configuration Options

+
[pr_code_suggestions]
+self_reflect_on_suggestions = true # Enable self-reflection on code suggestions
+suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core-abilities/static_code_analysis/index.html b/core-abilities/static_code_analysis/index.html new file mode 100644 index 000000000..b6625584c --- /dev/null +++ b/core-abilities/static_code_analysis/index.html @@ -0,0 +1,2288 @@ + + + + + + + + + + + + + + + + + + + + + + + Static code analysis - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Static code analysis

+ +

Overview - Static Code Analysis 💎

+

By combining static code analysis with LLM capabilities, Qodo Merge can provide a comprehensive analysis of the PR code changes on a component level.

+

It scans the PR code changes, finds all the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.

+
+

Language that are currently supported:

+

Python, Java, C++, JavaScript, TypeScript, C#.

+
+

Capabilities

+

Analyze PR

+

The analyze tool enables to interactively generate tests, docs, code suggestions and similar code search for each component that changed in the PR. +It can be invoked manually by commenting on any PR: +

/analyze
+

+

An example result:

+

Analyze 1

+

Clicking on each checkbox will trigger the relevant tool for the selected component.

+

Generate Tests

+

The test tool generate tests for a selected component, based on the PR code changes. +It can be invoked manually by commenting on any PR: +

/test component_name
+
+where 'component_name' is the name of a specific component in the PR, Or be triggered interactively by using the analyze tool.

+

test1

+

Generate Docs for a Component

+

The add_docs tool scans the PR code changes, and automatically generate docstrings for any code components that changed in the PR. +It can be invoked manually by commenting on any PR: +

/add_docs component_name
+

+

Or be triggered interactively by using the analyze tool.

+

Docs single component

+

Generate Code Suggestions for a Component

+

The improve_component tool generates code suggestions for a specific code component that changed in the PR. +It can be invoked manually by commenting on any PR: +

/improve_component component_name
+

+

Or be triggered interactively by using the analyze tool.

+

improve_component2

+

Find Similar Code

+

The similar code tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.

+

For example:

+

Global Search for a method called chat_completion:

+

similar code global

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/css/custom.css b/css/custom.css new file mode 100644 index 000000000..356283186 --- /dev/null +++ b/css/custom.css @@ -0,0 +1,49 @@ + + +:root { + --md-primary-fg-color: #765bfa; + --md-accent-fg-color: #AEA1F1; + } + +.md-nav--primary { + .md-nav__link { + font-size: 18px; /* Change the font size as per your preference */ + } +} + +.md-nav--primary { + position: relative; /* Ensure the element is positioned */ +} + +.md-nav--primary::before { + content: ""; + position: absolute; + top: 0; + right: 10px; /* Move the border 10 pixels to the right */ + width: 2px; + height: 100%; + background-color: #f5f5f5; /* Match the border color */ +} +/*.md-nav__title, .md-nav__link {*/ +/* font-size: 18px;*/ +/* margin-top: 14px; !* Adjust the space as needed *!*/ +/* margin-bottom: 14px; !* Adjust the space as needed *!*/ +/*}*/ + +.md-tabs__link { + font-size: 18px; +} + +.md-header__title { + font-size: 20px; + margin-left: 0px !important; +} + +.md-content img { + border-width: 1px; + border-style: solid; + border-color: black; + outline-width: 1px; + outline-style: solid; + outline-color: darkgray; + } diff --git a/faq/index.html b/faq/index.html new file mode 100644 index 000000000..ed003f398 --- /dev/null +++ b/faq/index.html @@ -0,0 +1,2125 @@ + + + + + + + + + + + + + + + + + + + + + FAQ - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

FAQ

+
+Question: Can Qodo Merge serve as a substitute for a human reviewer? +

Answer:1

+

Qodo Merge is designed to assist, not replace, human reviewers.

+

Reviewing PRs is a tedious and time-consuming task often seen as a "chore". In addition, the longer the PR – the shorter the relative feedback, since long PRs can overwhelm reviewers, both in terms of technical difficulty, and the actual review time. +Qodo Merge aims to address these pain points, and to assist and empower both the PR author and reviewer.

+

However, Qodo Merge has built-in safeguards to ensure the developer remains in the driver's seat. For example:

+
    +
  1. Preserves user's original PR header
  2. +
  3. Places user's description above the AI-generated PR description
  4. +
  5. Cannot approve PRs; approval remains reviewer's responsibility
  6. +
  7. The code suggestions are optional, and aim to:
      +
    • Encourage self-review and self-reflection
    • +
    • Highlight potential bugs or oversights
    • +
    • Enhance code quality and promote best practices
    • +
    +
  8. +
+

Read more about this issue in our blog

+
+
+
+Question: I received an incorrect or irrelevant suggestion. Why? +

Answer:2

+
    +
  • Modern AI models, like Claude 3.5 Sonnet and GPT-4, are improving rapidly but remain imperfect. Users should critically evaluate all suggestions rather than accepting them automatically.
  • +
  • +

    AI errors are rare, but possible. A main value from reviewing the code suggestions lies in their high probability of catching mistakes or bugs made by the PR author. We believe it's worth spending 30-60 seconds reviewing suggestions, even if some aren't relevant, as this practice can enhances code quality and prevent bugs in production.

    +
  • +
  • +

    The hierarchical structure of the suggestions is designed to help the user to quickly understand them, and to decide which ones are relevant and which are not:

    +
      +
    • Only if the Category header is relevant, the user should move to the summarized suggestion description.
    • +
    • Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
    • +
    +
  • +
  • +

    In addition, we recommend to use the extra_instructions field to guide the model to suggestions that are more relevant to the specific needs of the project.

    +
  • +
  • The interactive PR chat also provides an easy way to get more tailored suggestions and feedback from the AI model.
  • +
+
+
+
+Question: How can I get more tailored suggestions? +

Answer:3

+

See here for more information on how to use the extra_instructions and best_practices configuration options, to guide the model to more tailored suggestions.

+
+
+
+Question: Will you store my code ? Are you using my code to train models? +

Answer:4

+

No. Qodo Merge strict privacy policy ensures that your code is not stored or used for training purposes.

+

For a detailed overview of our data privacy policy, please refer to this link

+
+
+
+Question: Can I use my own LLM keys with Qodo Merge? +

Answer:5

+

When you self-host, you use your own keys.

+

Qodo Merge Pro with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys. +For enterprise customers, on-prem deployment is also available. Contact us for more information.

+
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/finetuning_benchmark/index.html b/finetuning_benchmark/index.html new file mode 100644 index 000000000..c6334e8dc --- /dev/null +++ b/finetuning_benchmark/index.html @@ -0,0 +1,2384 @@ + + + + + + + + + + + + + + + + + + + + + + + Code Fine-tuning Benchmark - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Qodo Merge Code Fine-tuning Benchmark

+

On coding tasks, the gap between open-source models and top closed-source models such as GPT4 is significant. +
+In practice, open-source models are unsuitable for most real-world code tasks, and require further fine-tuning to produce acceptable results.

+

Qodo Merge fine-tuning benchmark aims to benchmark open-source models on their ability to be fine-tuned for a coding task. +Specifically, we chose to fine-tune open-source models on the task of analyzing a pull request, and providing useful feedback and code suggestions.

+

Here are the results: +
+

+

Model performance:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Model nameModel size [B]Better than gpt-4 rate, after fine-tuning [%]
DeepSeek 34B-instruct3440.7
DeepSeek 34B-base3438.2
Phind-34b3438
Granite-34B3437.6
Codestral-22B-v0.12232.7
QWEN-1.5-32B3229
CodeQwen1.5-7B735.4
Llama-3.1-8B-Instruct835.2
Granite-8b-code-instruct834.2
CodeLlama-7b-hf731.8
Gemma-7B727.2
DeepSeek coder-7b-instruct726.8
Llama-3-8B-Instruct826.8
Mistral-7B-v0.1716.1
+


+

Fine-tuning impact:

+ + + + + + + + + + + + + + + + + + + + + + + +
Model nameModel size [B]Fine-tunedBetter than gpt-4 rate [%]
DeepSeek 34B-instruct34yes40.7
DeepSeek 34B-instruct34no3.6
+

Results analysis

+
    +
  • Fine-tuning is a must - without fine-tuning, open-source models provide poor results on most real-world code tasks, which include complicated prompt and lengthy context. We clearly see that without fine-tuning, deepseek model was 96.4% of the time inferior to GPT-4, while after fine-tuning, it is better 40.7% of the time.
  • +
  • Always start from a code-dedicated model — When fine-tuning, always start from a code-dedicated model, and not from a general-usage model. The gaps in downstream results are very big.
  • +
  • Don't believe the hype —newer models, or models from big-tech companies (Llama3, Gemma, Mistral), are not always better for fine-tuning.
  • +
  • The best large model - For large 34B code-dedicated models, the gaps when doing proper fine-tuning are small. The current top model is DeepSeek 34B-instruct
  • +
  • The best small model - For small 7B code-dedicated models, the gaps when fine-tuning are much larger. CodeQWEN 1.5-7B is by far the best model for fine-tuning.
  • +
  • Base vs. instruct - For the top model (deepseek), we saw small advantage when starting from the instruct version. However, we recommend testing both versions on each specific task, as the base model is generally considered more suitable for fine-tuning.
  • +
+

The dataset

+

Training dataset

+

Our training dataset comprises 25,000 pull requests, aggregated from permissive license repos. For each pull request, we generated responses for the three main tools of Qodo Merge: +Describe, Review and Improve.

+

On the raw data collected, we employed various automatic and manual cleaning techniques to ensure the outputs were of the highest quality, and suitable for instruct-tuning.

+

Here are the prompts, and example outputs, used as input-output pairs to fine-tune the models:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ToolPromptExample output
Describelinklink
Reviewlinklink
Improvelinklink
+

Evaluation dataset

+
    +
  • For each tool, we aggregated 100 additional examples to be used for evaluation. These examples were not used in the training dataset, and were manually selected to represent diverse real-world use-cases.
  • +
  • +

    For each test example, we generated two responses: one from the fine-tuned model, and one from the best code model in the world, gpt-4-turbo-2024-04-09.

    +
  • +
  • +

    We used a third LLM to judge which response better answers the prompt, and will likely be perceived by a human as better response. +

    +
  • +
+

We experimented with three model as judges: gpt-4-turbo-2024-04-09, gpt-4o, and claude-3-opus-20240229. All three produced similar results, with the same ranking order. This strengthens the validity of our testing protocol. +The evaluation prompt can be found here

+

Here is an example of a judge model feedback:

+
command: improve
+model1_score: 9,
+model2_score: 6,
+why: |
+  Response 1 is better because it provides more actionable and specific suggestions that directly 
+  enhance the code's maintainability, performance, and best practices. For example, it suggests 
+  using a variable for reusable widget instances and using named routes for navigation, which 
+  are practical improvements. In contrast, Response 2 focuses more on general advice and less 
+  actionable suggestions, such as changing variable names and adding comments, which are less 
+  critical for immediate code improvement."
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..a56981280 --- /dev/null +++ b/index.html @@ -0,0 +1,2381 @@ + + + + + + + + + + + + + + + + + + + + + Overview - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Overview

+

Qodo Merge is an open-source tool to help efficiently review and handle pull requests.

+
    +
  • +

    See the Installation Guide for instructions on installing and running the tool on different git platforms.

    +
  • +
  • +

    See the Usage Guide for instructions on running the Qodo Merge commands via different interfaces, including CLI, online usage, or by automatically triggering them when a new PR is opened.

    +
  • +
  • +

    See the Tools Guide for a detailed description of the different tools.

    +
  • +
+ +

To search the documentation site using natural language:

+

1) Comment /help "your question" in either:

+
    +
  • A pull request where Qodo Merge is installed
  • +
  • A PR Chat
  • +
+

2) Qodo Merge will respond with an answer that includes relevant documentation links.

+

Qodo Merge Features

+

Qodo Merge offers extensive pull request functionalities across various git providers.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHubGitlabBitbucketAzure DevOps
TOOLSReview
⮑ Incremental
Ask
Describe
Inline file summary 💎
Improve
⮑ Extended
Custom Prompt 💎
Reflect and Review
Update CHANGELOG.md
Find Similar Issue
Add PR Documentation 💎
Generate Custom Labels 💎
Analyze PR Components 💎
USAGECLI
App / webhook
Actions
COREPR compression
Repo language prioritization
Adaptive and token-aware file patch fitting
Multiple models support
Incremental PR review
Static code analysis 💎
Multiple configuration options 💎
+

💎 marks a feature available only in Qodo Merge Pro

+

Example Results

+
+ +

/describe

+
+

/describe

+
+
+ +

/review

+
+

/review

+
+
+ +

/improve

+
+

/improve

+
+
+ +

/generate_labels

+
+

/generate_labels

+
+
+ +

How it Works

+

The following diagram illustrates Qodo Merge tools and their flow:

+

Qodo Merge Tools

+

Check out the core abilities page for a comprehensive overview of the variety of core abilities used by Qodo Merge.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/azure/index.html b/installation/azure/index.html new file mode 100644 index 000000000..f2253e16a --- /dev/null +++ b/installation/azure/index.html @@ -0,0 +1,2255 @@ + + + + + + + + + + + + + + + + + + + + + + + Azure DevOps - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Azure DevOps

+ +

Azure DevOps Pipeline

+

You can use a pre-built Action Docker image to run Qodo Merge as an Azure devops pipeline. +add the following file to your repository under azure-pipelines.yml: +

# Opt out of CI triggers
+trigger: none
+
+# Configure PR trigger
+pr:
+  branches:
+    include:
+    - '*'
+  autoCancel: true
+  drafts: false
+
+stages:
+- stage: pr_agent
+  displayName: 'PR Agent Stage'
+  jobs:
+  - job: pr_agent_job
+    displayName: 'PR Agent Job'
+    pool:
+      vmImage: 'ubuntu-latest'
+    container:
+      image: codiumai/pr-agent:latest
+      options: --entrypoint ""
+    variables:
+      - group: pr_agent
+    steps:
+    - script: |
+        echo "Running PR Agent action step"
+
+        # Construct PR_URL
+        PR_URL="${SYSTEM_COLLECTIONURI}${SYSTEM_TEAMPROJECT}/_git/${BUILD_REPOSITORY_NAME}/pullrequest/${SYSTEM_PULLREQUEST_PULLREQUESTID}"
+        echo "PR_URL=$PR_URL"
+
+        # Extract organization URL from System.CollectionUri
+        ORG_URL=$(echo "$(System.CollectionUri)" | sed 's/\/$//') # Remove trailing slash if present
+        echo "Organization URL: $ORG_URL"
+
+        export azure_devops__org="$ORG_URL"
+        export config__git_provider="azure"
+
+        pr-agent --pr_url="$PR_URL" describe
+        pr-agent --pr_url="$PR_URL" review
+        pr-agent --pr_url="$PR_URL" improve
+      env:
+        azure_devops__pat: $(azure_devops_pat)
+        openai__key: $(OPENAI_KEY)
+      displayName: 'Run Qodo Merge'
+
+This script will run Qodo Merge on every new merge request, with the improve, review, and describe commands. +Note that you need to export the azure_devops__pat and OPENAI_KEY variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group): +Qodo Merge Pro

+

Make sure to give pipeline permissions to the pr_agent variable group.

+

Azure DevOps from CLI

+

To use Azure DevOps provider use the following settings in configuration.toml: +

[config]
+git_provider="azure"
+

+

Azure DevOps provider supports PAT token or DefaultAzureCredential authentication. +PAT is faster to create, but has build in expiration date, and will use the user identity for API calls. +Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.

+

If PAT was chosen, you can assign the value in .secrets.toml. +If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly, +or use managed identity/az cli (for local development) without any additional configuration. +in any case, 'org' value must be assigned in .secrets.toml: +

[azure_devops]
+org = "https://dev.azure.com/YOUR_ORGANIZATION/"
+# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
+

+

Azure DevOps Webhook

+

To trigger from an Azure webhook, you need to manually add a webhook. +Use the "Pull request created" type to trigger a review, or "Pull request commented on" to trigger any supported comment with / comment on the relevant PR. Note that for the "Pull request commented on" trigger, only API v2.0 is supported.

+

For webhook security, create a sporadic username/password pair and configure the webhook username and password on both the server and Azure DevOps webhook. These will be sent as basic Auth data by the webhook with each request: +

[azure_devops_server]
+webhook_username = "<basic auth user>"
+webhook_password = "<basic auth password>"
+

+
+

⚠ Ensure that the webhook endpoint is only accessible over HTTPS to mitigate the risk of credential interception when using basic authentication.

+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/bitbucket/index.html b/installation/bitbucket/index.html new file mode 100644 index 000000000..087314786 --- /dev/null +++ b/installation/bitbucket/index.html @@ -0,0 +1,2257 @@ + + + + + + + + + + + + + + + + + + + + + + + BitBucket - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

BitBucket

+ +

Run as a Bitbucket Pipeline

+

You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.

+
    +
  1. Add the following file in your repository bitbucket_pipelines.yml
  2. +
+
pipelines:
+    pull-requests:
+      '**':
+        - step:
+            name: PR Agent Review
+            image: python:3.10
+            services:
+              - docker
+            script:
+              - docker run -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=https://bitbucket.org/$BITBUCKET_WORKSPACE/$BITBUCKET_REPO_SLUG/pull-requests/$BITBUCKET_PR_ID review
+
+
    +
  1. Add the following secure variables to your repository under Repository settings > Pipelines > Repository variables. +OPENAI_API_KEY: <your key> +BITBUCKET_BEARER_TOKEN: <your token>
  2. +
+

You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.

+

Note that comments on a PR are not supported in Bitbucket Pipeline.

+

Run using CodiumAI-hosted Bitbucket app 💎

+

Please contact visit Qodo Merge Pro if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the bitbucket_app.py implementation.

+

Bitbucket Server and Data Center

+

Login into your on-prem instance of Bitbucket with your service account username and password. +Navigate to Manage account, HTTP Access tokens, Create Token. +Generate the token and add it to .secret.toml under bitbucket_server section

+
[bitbucket_server]
+bearer_token = "<your key>"
+
+

Run it as CLI

+

Modify configuration.toml:

+
git_provider="bitbucket_server"
+
+

and pass the Pull request URL: +

python cli.py --pr_url https://git.onpreminstanceofbitbucket.com/projects/PROJECT/repos/REPO/pull-requests/1 review
+

+

Run it as service

+

To run Qodo Merge as webhook, build the docker image: +

docker build . -t codiumai/pr-agent:bitbucket_server_webhook --target bitbucket_server_webhook -f docker/Dockerfile
+docker push codiumai/pr-agent:bitbucket_server_webhook  # Push to your Docker repository
+

+

Navigate to Projects or Repositories, Settings, Webhooks, Create Webhook. +Fill the name and URL, Authentication None select the Pull Request Opened checkbox to receive that event as webhook.

+

The URL should end with /webhook, for example: https://domain.com/webhook

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/github/index.html b/installation/github/index.html new file mode 100644 index 000000000..4fc523e3f --- /dev/null +++ b/installation/github/index.html @@ -0,0 +1,2470 @@ + + + + + + + + + + + + + + + + + + + + + + + GitHub - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

GitHub

+ +

Run as a GitHub Action

+

You can use our pre-built Github Action Docker image to run Qodo Merge as a Github Action.

+

1) Add the following file to your repository under .github/workflows/pr_agent.yml:

+
on:
+  pull_request:
+    types: [opened, reopened, ready_for_review]
+  issue_comment:
+jobs:
+  pr_agent_job:
+    if: ${{ github.event.sender.type != 'Bot' }}
+    runs-on: ubuntu-latest
+    permissions:
+      issues: write
+      pull-requests: write
+      contents: write
+    name: Run pr agent on every pull request, respond to user comments
+    steps:
+      - name: PR Agent action step
+        id: pragent
+        uses: Codium-ai/pr-agent@main
+        env:
+          OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+

2) Add the following secret to your repository under Settings > Secrets and variables > Actions > New repository secret > Add secret:

+
Name = OPENAI_KEY
+Secret = <your key>
+
+

The GITHUB_TOKEN secret is automatically created by GitHub.

+

3) Merge this change to your main branch. +When you open your next PR, you should see a comment from github-actions bot with a review of your PR, and instructions on how to use the rest of the tools.

+

4) You may configure Qodo Merge by adding environment variables under the env section corresponding to any configurable property in the configuration file. Some examples: +

      env:
+        # ... previous environment values
+        OPENAI.ORG: "<Your organization name under your OpenAI account>"
+        PR_REVIEWER.REQUIRE_TESTS_REVIEW: "false" # Disable tests review
+        PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions
+
+See detailed usage instructions in the USAGE GUIDE

+

Using a specific release

+
+

if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use: +

...
+    steps:
+      - name: PR Agent action step
+        id: pragent
+        uses: docker://codiumai/pr-agent:0.23-github_action
+...
+

+

For enhanced security, you can also specify the Docker image by its digest: +

...
+    steps:
+      - name: PR Agent action step
+        id: pragent
+        uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
+...
+

+
+

Action for GitHub enterprise server

+
+

To use the action with a GitHub enterprise server, add an environment variable GITHUB.BASE_URL with the API URL of your GitHub server.

+

For example, if your GitHub server is at https://github.mycompany.com, add the following to your workflow file: +

      env:
+        # ... previous environment values
+        GITHUB.BASE_URL: "https://github.mycompany.com/api/v3"
+

+
+
+

Run as a GitHub App

+

Allowing you to automate the review process on your private or public repositories.

+

1) Create a GitHub App from the Github Developer Portal.

+
    +
  • Set the following permissions:
      +
    • Pull requests: Read & write
    • +
    • Issue comment: Read & write
    • +
    • Metadata: Read-only
    • +
    • Contents: Read-only
    • +
    +
  • +
  • Set the following events:
      +
    • Issue comment
    • +
    • Pull request
    • +
    • Push (if you need to enable triggering on PR update)
    • +
    +
  • +
+

2) Generate a random secret for your app, and save it for later. For example, you can use:

+
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
+
+

3) Acquire the following pieces of information from your app's settings page:

+
    +
  • App private key (click "Generate a private key" and save the file)
  • +
  • App ID
  • +
+

4) Clone this repository:

+
git clone https://github.com/Codium-ai/pr-agent.git
+
+

5) Copy the secrets template file and fill in the following:

+
cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
+# Edit .secrets.toml file
+
+
    +
  • Your OpenAI key.
  • +
  • Copy your app's private key to the private_key field.
  • +
  • Copy your app's ID to the app_id field.
  • +
  • Copy your app's webhook secret to the webhook_secret field.
  • +
  • +

    Set deployment_type to 'app' in configuration.toml

    +
    +

    The .secrets.toml file is not copied to the Docker image by default, and is only used for local development. +If you want to use the .secrets.toml file in your Docker image, you can add remove it from the .dockerignore file. +In most production environments, you would inject the secrets file as environment variables or as mounted volumes. +For example, in order to inject a secrets file as a volume in a Kubernetes environment you can update your pod spec to include the following, +assuming you have a secret named pr-agent-settings with a key named .secrets.toml: +

           volumes:
    +        - name: settings-volume
    +          secret:
    +            secretName: pr-agent-settings
    +// ...
    +       containers:
    +// ...
    +          volumeMounts:
    +            - mountPath: /app/pr_agent/settings_prod
    +              name: settings-volume
    +

    +

    Another option is to set the secrets as environment variables in your deployment environment, for example OPENAI.KEY and GITHUB.USER_TOKEN.

    +
    +
  • +
+

6) Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:

+
```
+docker build . -t codiumai/pr-agent:github_app --target github_app -f docker/Dockerfile
+docker push codiumai/pr-agent:github_app  # Push to your Docker repository
+```
+
+
    +
  1. +

    Host the app using a server, serverless function, or container environment. Alternatively, for development and + debugging, you may use tools like smee.io to forward webhooks to your local machine. + You can check Deploy as a Lambda Function

    +
  2. +
  3. +

    Go back to your app's settings, and set the following:

    +
  4. +
  5. +

    Webhook URL: The URL of your app's server or the URL of the smee.io channel.

    +
  6. +
  7. +

    Webhook secret: The secret you generated earlier.

    +
  8. +
  9. +

    Install the app by navigating to the "Install App" tab and selecting your desired repositories.

    +
  10. +
+
+

Note: When running Qodo Merge from GitHub app, the default configuration file (configuration.toml) will be loaded. +However, you can override the default tool parameters by uploading a local configuration file .pr_agent.toml +For more information please check out the USAGE GUIDE

+
+
+

Deploy as a Lambda Function

+

Note that since AWS Lambda env vars cannot have "." in the name, you can replace each "." in an env variable with "__".
+For example: GITHUB.WEBHOOK_SECRET --> GITHUB__WEBHOOK_SECRET

+
    +
  1. Follow steps 1-5 from here.
  2. +
  3. Build a docker image that can be used as a lambda function + shell + docker buildx build --platform=linux/amd64 . -t codiumai/pr-agent:serverless -f docker/Dockerfile.lambda
  4. +
  5. Push image to ECR +
    docker tag codiumai/pr-agent:serverless <AWS_ACCOUNT>.dkr.ecr.<AWS_REGION>.amazonaws.com/codiumai/pr-agent:serverless
    +docker push <AWS_ACCOUNT>.dkr.ecr.<AWS_REGION>.amazonaws.com/codiumai/pr-agent:serverless
    +
  6. +
  7. Create a lambda function that uses the uploaded image. Set the lambda timeout to be at least 3m.
  8. +
  9. Configure the lambda function to have a Function URL.
  10. +
  11. In the environment variables of the Lambda function, specify AZURE_DEVOPS_CACHE_DIR to a writable location such as /tmp. (see link)
  12. +
  13. Go back to steps 8-9 of Method 5 with the function url as your Webhook URL. + The Webhook URL would look like https://<LAMBDA_FUNCTION_URL>/api/v1/github_webhooks
  14. +
+
+

AWS CodeCommit Setup

+

Not all features have been added to CodeCommit yet. As of right now, CodeCommit has been implemented to run the Qodo Merge CLI on the command line, using AWS credentials stored in environment variables. (More features will be added in the future.) The following is a set of instructions to have Qodo Merge do a review of your CodeCommit pull request from the command line:

+
    +
  1. Create an IAM user that you will use to read CodeCommit pull requests and post comments
      +
    • Note: That user should have CLI access only, not Console access
    • +
    +
  2. +
  3. Add IAM permissions to that user, to allow access to CodeCommit (see IAM Role example below)
  4. +
  5. Generate an Access Key for your IAM user
  6. +
  7. Set the Access Key and Secret using environment variables (see Access Key example below)
  8. +
  9. Set the git_provider value to codecommit in the pr_agent/settings/configuration.toml settings file
  10. +
  11. Set the PYTHONPATH to include your pr-agent project directory
      +
    • Option A: Add PYTHONPATH="/PATH/TO/PROJECTS/pr-agent to your .env file
    • +
    • Option B: Set PYTHONPATH and run the CLI in one command, for example:
        +
      • PYTHONPATH="/PATH/TO/PROJECTS/pr-agent python pr_agent/cli.py [--ARGS]
      • +
      +
    • +
    +
  12. +
+
+

AWS CodeCommit IAM Role Example

+

Example IAM permissions to that user to allow access to CodeCommit:

+
    +
  • Note: The following is a working example of IAM permissions that has read access to the repositories and write access to allow posting comments
  • +
  • Note: If you only want pr-agent to review your pull requests, you can tighten the IAM permissions further, however this IAM example will work, and allow the pr-agent to post comments to the PR
  • +
  • Note: You may want to replace the "Resource": "*" with your list of repos, to limit access to only those repos
  • +
+
{
+    "Version": "2012-10-17",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": [
+                "codecommit:BatchDescribe*",
+                "codecommit:BatchGet*",
+                "codecommit:Describe*",
+                "codecommit:EvaluatePullRequestApprovalRules",
+                "codecommit:Get*",
+                "codecommit:List*",
+                "codecommit:PostComment*",
+                "codecommit:PutCommentReaction",
+                "codecommit:UpdatePullRequestDescription",
+                "codecommit:UpdatePullRequestTitle"
+            ],
+            "Resource": "*"
+        }
+    ]
+}
+
+

AWS CodeCommit Access Key and Secret

+

Example setting the Access Key and Secret using environment variables

+
export AWS_ACCESS_KEY_ID="XXXXXXXXXXXXXXXX"
+export AWS_SECRET_ACCESS_KEY="XXXXXXXXXXXXXXXX"
+export AWS_DEFAULT_REGION="us-east-1"
+
+

AWS CodeCommit CLI Example

+

After you set up AWS CodeCommit using the instructions above, here is an example CLI run that tells pr-agent to review a given pull request. +(Replace your specific PYTHONPATH and PR URL in the example)

+
PYTHONPATH="/PATH/TO/PROJECTS/pr-agent" python pr_agent/cli.py \
+  --pr_url https://us-east-1.console.aws.amazon.com/codesuite/codecommit/repositories/MY_REPO_NAME/pull-requests/321 \
+  review
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/gitlab/index.html b/installation/gitlab/index.html new file mode 100644 index 000000000..ecd84fd3f --- /dev/null +++ b/installation/gitlab/index.html @@ -0,0 +1,2211 @@ + + + + + + + + + + + + + + + + + + + + + + + GitLab - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

GitLab

+ +

Run as a GitLab Pipeline

+

You can use a pre-built Action Docker image to run Qodo Merge as a GitLab pipeline. This is a simple way to get started with Qodo Merge without setting up your own server.

+

(1) Add the following file to your repository under .gitlab-ci.yml: +

stages:
+  - pr_agent
+
+pr_agent_job:
+  stage: pr_agent
+  image:
+    name: codiumai/pr-agent:latest
+    entrypoint: [""]
+  script:
+    - cd /app
+    - echo "Running PR Agent action step"
+    - export MR_URL="$CI_MERGE_REQUEST_PROJECT_URL/merge_requests/$CI_MERGE_REQUEST_IID"
+    - echo "MR_URL=$MR_URL"
+    - export gitlab__url=$CI_SERVER_PROTOCOL://$CI_SERVER_FQDN
+    - export gitlab__PERSONAL_ACCESS_TOKEN=$GITLAB_PERSONAL_ACCESS_TOKEN
+    - export config__git_provider="gitlab"
+    - export openai__key=$OPENAI_KEY
+    - python -m pr_agent.cli --pr_url="$MR_URL" describe
+    - python -m pr_agent.cli --pr_url="$MR_URL" review
+    - python -m pr_agent.cli --pr_url="$MR_URL" improve
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+
+This script will run Qodo Merge on every new merge request. You can modify the rules section to run Qodo Merge on different events. +You can also modify the script section to run different Qodo Merge commands, or with different parameters by exporting different environment variables.

+

(2) Add the following masked variables to your GitLab repository (CI/CD -> Variables):

+
    +
  • +

    GITLAB_PERSONAL_ACCESS_TOKEN: Your GitLab personal access token.

    +
  • +
  • +

    OPENAI_KEY: Your OpenAI key.

    +
  • +
+

Note that if your base branches are not protected, don't set the variables as protected, since the pipeline will not have access to them.

+

Run a GitLab webhook server

+
    +
  1. +

    From the GitLab workspace or group, create an access token. Enable the "api" scope only.

    +
  2. +
  3. +

    Generate a random secret for your app, and save it for later. For example, you can use:

    +
  4. +
+

WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
+
+3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from here steps 4-7.

+
    +
  1. +

    In the secrets file, fill in the following:

    +
      +
    • Your OpenAI key.
    • +
    • In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
    • +
    • Set deployment_type to 'gitlab' in configuration.toml
    • +
    +
  2. +
  3. +

    Create a webhook in GitLab. Set the URL to http[s]://<PR_AGENT_HOSTNAME>/webhook. Set the secret token to the generated secret from step 2. +In the "Trigger" section, check the ‘comments’ and ‘merge request events’ boxes.

    +
  4. +
  5. +

    Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.

    +
  6. +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/index.html b/installation/index.html new file mode 100644 index 000000000..094af0e15 --- /dev/null +++ b/installation/index.html @@ -0,0 +1,2108 @@ + + + + + + + + + + + + + + + + + + + + + + + Installation - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Installation

+

Self-hosted Qodo Merge

+

If you choose to host your own Qodo Merge, you first need to acquire two tokens:

+
    +
  1. An OpenAI key from here, with access to GPT-4 (or a key for other language models, if you prefer).
  2. +
  3. A GitHub\GitLab\BitBucket personal access token (classic), with the repo scope. [GitHub from here]
  4. +
+

There are several ways to use self-hosted Qodo Merge:

+ +

Qodo Merge Pro 💎

+

Qodo Merge Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available. +
+With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo. +See here for more details.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/locally/index.html b/installation/locally/index.html new file mode 100644 index 000000000..674819e44 --- /dev/null +++ b/installation/locally/index.html @@ -0,0 +1,2255 @@ + + + + + + + + + + + + + + + + + + + + + + + Locally - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Locally

+ +

Using pip package

+

Install the package:

+
pip install pr-agent
+
+

Then run the relevant tool with the script below. +
+Make sure to fill in the required parameters (user_token, openai_key, pr_url, command):

+
from pr_agent import cli
+from pr_agent.config_loader import get_settings
+
+def main():
+    # Fill in the following values
+    provider = "github" # github/gitlab/bitbucket/azure_devops
+    user_token = "..."  #  user token
+    openai_key = "..."  # OpenAI key
+    pr_url = "..."      # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
+    command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"', ...)
+
+    # Setting the configurations
+    get_settings().set("CONFIG.git_provider", provider)
+    get_settings().set("openai.key", openai_key)
+    get_settings().set("github.user_token", user_token)
+
+    # Run the command. Feedback will appear in GitHub PR comments
+    cli.run_command(pr_url, command)
+
+
+if __name__ == '__main__':
+    main()
+
+

Using Docker image

+

A list of the relevant tools can be found in the tools guide.

+

To invoke a tool (for example review), you can run directly from the Docker image. Here's how:

+
    +
  • +

    For GitHub: +

    docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
    +
    + If you are using GitHub enterprise server, you need to specify the custom url as variable.
    + For example, if your GitHub server is at https://github.mycompany.com, add the following to the command: +
    -e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
    +

    +
  • +
  • +

    For GitLab: +

    docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
    +

    +

    If you have a dedicated GitLab instance, you need to specify the custom url as variable: +

    -e GITLAB.URL=<your gitlab instance url> 
    +

    +
  • +
  • +

    For BitBucket: +

    docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
    +

    +
  • +
+

For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the pr_agent/settings/.secrets_template.toml file for the environment variables expected names and values.

+
+

Run from source

+
    +
  1. Clone this repository:
  2. +
+
git clone https://github.com/Codium-ai/pr-agent.git
+
+
    +
  1. Navigate to the /pr-agent folder and install the requirements in your favorite virtual environment:
  2. +
+
pip install -e .
+
+

Note: If you get an error related to Rust in the dependency installation then make sure Rust is installed and in your PATH, instructions: https://rustup.rs

+
    +
  1. Copy the secrets template file and fill in your OpenAI key and your GitHub user token:
  2. +
+
cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
+chmod 600 pr_agent/settings/.secrets.toml
+# Edit .secrets.toml file
+
+
    +
  1. Run the cli.py script:
  2. +
+
python3 -m pr_agent.cli --pr_url <pr_url> review
+python3 -m pr_agent.cli --pr_url <pr_url> ask <your question>
+python3 -m pr_agent.cli --pr_url <pr_url> describe
+python3 -m pr_agent.cli --pr_url <pr_url> improve
+python3 -m pr_agent.cli --pr_url <pr_url> add_docs
+python3 -m pr_agent.cli --pr_url <pr_url> generate_labels
+python3 -m pr_agent.cli --issue_url <issue_url> similar_issue
+...
+
+

[Optional] Add the pr_agent folder to your PYTHONPATH +

export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>
+

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/installation/pr_agent_pro/index.html b/installation/pr_agent_pro/index.html new file mode 100644 index 000000000..40ff0fe77 --- /dev/null +++ b/installation/pr_agent_pro/index.html @@ -0,0 +1,2287 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Qodo Merge Pro - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

💎 Qodo Merge Pro

+ +

Getting Started with Qodo Merge Pro

+

Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI. +See here for more details about the benefits of using Qodo Merge Pro.

+

Interested parties can subscribe to Qodo Merge Pro through the following link. +After subscribing, you are granted the ability to easily install the application across any of your repositories.

+

Qodo Merge Pro

+

Each user who wants to use Qodo Merge pro needs to buy a seat. +Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat. +Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.

+

Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks. +Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.

+

Install Qodo Merge Pro for GitHub Enterprise Server

+

To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an Enterprise trial.

+

Install Qodo Merge Pro for GitLab (Teams & Enterprise)

+

Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:

+

Step 1

+

Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.

+
+

Step 1

+
+

Store the token in a safe place, you won’t be able to access it again after it was generated.

+

Step 2

+

Generate a shared secret and link it to the access token. Browse to https://register.gitlab.pr-agent.codium.ai. +Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit".

+

You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you won’t be able to access it again after it was generated.

+

Step 3

+

Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button.

+
+

Step 3.1

+
+

In the webhook definition form, fill in the following fields: +URL: https://pro.gitlab.pr-agent.codium.ai/webhook

+

Secret token: Your CodiumAI key +Trigger: Check the ‘comments’ and ‘merge request events’ boxes. +Enable SSL verification: Check the box.

+
+

Step 3.2

+
+

Step 4

+

You’re all set!

+

Open a new merge request or add a MR comment with one of Qodo Merge’s commands such as /review, /describe or /improve.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/overview/data_privacy/index.html b/overview/data_privacy/index.html new file mode 100644 index 000000000..ec91e17af --- /dev/null +++ b/overview/data_privacy/index.html @@ -0,0 +1,2182 @@ + + + + + + + + + + + + + + + + + + + + + + + Data Privacy - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Data Privacy

+ +

Self-hosted Qodo Merge

+
    +
  • If you self-host Qodo Merge with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to Qodo Merge servers.
  • +
+

Qodo Merge Pro 💎

+
    +
  • +

    When using Qodo Merge Pro 💎, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.

    +
  • +
  • +

    For certain clients, CodiumAI-hosted Qodo Merge Pro will use CodiumAI’s proprietary models. If this is the case, you will be notified.

    +
  • +
  • +

    No passive collection of Code and Pull Requests’ data — Qodo Merge will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.

    +
  • +
+

Qodo Merge Chrome extension

+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/overview/index.html b/overview/index.html new file mode 100644 index 000000000..a8d802721 --- /dev/null +++ b/overview/index.html @@ -0,0 +1,2374 @@ + + + + + + + + + + + + + + + + + + + Overview - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Overview

+

Qodo Merge is an open-source tool to help efficiently review and handle pull requests.

+
    +
  • +

    See the Installation Guide for instructions on installing and running the tool on different git platforms.

    +
  • +
  • +

    See the Usage Guide for instructions on running the Qodo Merge commands via different interfaces, including CLI, online usage, or by automatically triggering them when a new PR is opened.

    +
  • +
  • +

    See the Tools Guide for a detailed description of the different tools.

    +
  • +
+ +

To search the documentation site using natural language:

+

1) Comment /help "your question" in either:

+
    +
  • A pull request where Qodo Merge is installed
  • +
  • A PR Chat
  • +
+

2) Qodo Merge will respond with an answer that includes relevant documentation links.

+

Qodo Merge Features

+

Qodo Merge offers extensive pull request functionalities across various git providers.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHubGitlabBitbucketAzure DevOps
TOOLSReview
⮑ Incremental
Ask
Describe
Inline file summary 💎
Improve
⮑ Extended
Custom Prompt 💎
Reflect and Review
Update CHANGELOG.md
Find Similar Issue
Add PR Documentation 💎
Generate Custom Labels 💎
Analyze PR Components 💎
USAGECLI
App / webhook
Actions
COREPR compression
Repo language prioritization
Adaptive and token-aware file patch fitting
Multiple models support
Incremental PR review
Static code analysis 💎
Multiple configuration options 💎
+

💎 marks a feature available only in Qodo Merge Pro

+

Example Results

+
+ +

/describe

+
+

/describe

+
+
+ +

/review

+
+

/review

+
+
+ +

/improve

+
+

/improve

+
+
+ +

/generate_labels

+
+

/generate_labels

+
+
+ +

How it Works

+

The following diagram illustrates Qodo Merge tools and their flow:

+

Qodo Merge Tools

+

Check out the PR Compression strategy page for more details on how we convert a code diff to a manageable LLM prompt

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/overview/pr_agent_pro/index.html b/overview/pr_agent_pro/index.html new file mode 100644 index 000000000..2a20b8a56 --- /dev/null +++ b/overview/pr_agent_pro/index.html @@ -0,0 +1,2288 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Qodo Merge Pro - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Qodo Merge Pro

+ +

Overview

+

Qodo Merge Pro is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee. +Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:

+
    +
  1. +

    Fully managed - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.

    +
  2. +
  3. +

    Improved privacy - No data will be stored or used to train models. Qodo Merge Pro will employ zero data retention, and will use an OpenAI and Claude accounts with zero data retention.

    +
  4. +
  5. +

    Improved support - Qodo Merge Pro users will receive priority support, and will be able to request new features and capabilities.

    +
  6. +
  7. +

    Supporting self-hosted git servers - Qodo Merge Pro can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the installation guide.

    +
  8. +
  9. +

    PR Chat - Qodo Merge Pro allows you to engage in private chat about your pull requests on private repositories.

    +
  10. +
+

Additional features

+

Here are some of the additional features and capabilities that Qodo Merge Pro offers:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Model selectionChoose the model that best fits your needs, among top models like GPT4 and Claude-Sonnet-3.5
Global and wiki configurationControl configurations for many repositories from a single location;
Edit configuration of a single repo without committing code
Apply suggestionsGenerate committable code from the relevant suggestions interactively by clicking on a checkbox
Suggestions impactAutomatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions
CI feedbackAutomatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly
Advanced usage statisticsQodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights
Incorporating companies' best practicesUse the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions
Interactive triggeringInteractively apply different tools via the analyze command
Custom labelsDefine custom labels for Qodo Merge to assign to the PR
+

Additional tools

+

Here are additional tools that are available only for Qodo Merge Pro users:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureDescription
Custom Prompt SuggestionsGenerate code suggestions based on custom prompts from the user
Analyze PR componentsIdentify the components that changed in the PR, and enable to interactively apply different tools to them
TestsGenerate tests for code components that changed in the PR
PR documentationGenerate docstring for code components that changed in the PR
Improve ComponentGenerate code suggestions for code components that changed in the PR
Similar code searchSearch for similar code in the repository, organization, or entire GitHub
+

Supported languages

+

Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4. +As a result, its primary tools such as describe, review, and improve, as well as the PR-chat feature, support virtually all programming languages.

+

For specialized commands that require static code analysis, Qodo Merge Pro offers support for specific languages. For more details about features that require static code analysis, please refer to the documentation.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..e502d4ae0 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

Qodo Merge is an open-source tool to help efficiently review and handle pull requests.

  • See the Installation Guide for instructions on installing and running the tool on different git platforms.

  • See the Usage Guide for instructions on running the Qodo Merge commands via different interfaces, including CLI, online usage, or by automatically triggering them when a new PR is opened.

  • See the Tools Guide for a detailed description of the different tools.

"},{"location":"#qodo-merge-docs-smart-search","title":"Qodo Merge Docs Smart Search","text":"

To search the documentation site using natural language:

1) Comment /help \"your question\" in either:

  • A pull request where Qodo Merge is installed
  • A PR Chat

2) Qodo Merge will respond with an answer that includes relevant documentation links.

"},{"location":"#qodo-merge-features","title":"Qodo Merge Features","text":"

Qodo Merge offers extensive pull request functionalities across various git providers.

GitHub Gitlab Bitbucket Azure DevOps TOOLS Review \u2705 \u2705 \u2705 \u2705 \u2b91 Incremental \u2705 Ask \u2705 \u2705 \u2705 \u2705 Describe \u2705 \u2705 \u2705 \u2705 \u2b91 Inline file summary \ud83d\udc8e \u2705 \u2705 Improve \u2705 \u2705 \u2705 \u2705 \u2b91 Extended \u2705 \u2705 \u2705 \u2705 Custom Prompt \ud83d\udc8e \u2705 \u2705 \u2705 Reflect and Review \u2705 \u2705 \u2705 Update CHANGELOG.md \u2705 \u2705 \u2705 \ufe0f Find Similar Issue \u2705 \ufe0f Add PR Documentation \ud83d\udc8e \u2705 \u2705 Generate Custom Labels \ud83d\udc8e \u2705 \u2705 Analyze PR Components \ud83d\udc8e \u2705 \u2705 \ufe0f USAGE CLI \u2705 \u2705 \u2705 \u2705 App / webhook \u2705 \u2705 \u2705 \u2705 Actions \u2705 \ufe0f CORE PR compression \u2705 \u2705 \u2705 \u2705 Repo language prioritization \u2705 \u2705 \u2705 \u2705 Adaptive and token-aware file patch fitting \u2705 \u2705 \u2705 \u2705 Multiple models support \u2705 \u2705 \u2705 \u2705 Incremental PR review \u2705 Static code analysis \ud83d\udc8e \u2705 \u2705 \u2705 Multiple configuration options \ud83d\udc8e \u2705 \u2705 \u2705

\ud83d\udc8e marks a feature available only in Qodo Merge Pro

"},{"location":"#example-results","title":"Example Results","text":""},{"location":"#describe","title":"/describe","text":""},{"location":"#review","title":"/review","text":""},{"location":"#improve","title":"/improve","text":""},{"location":"#generate_labels","title":"/generate_labels","text":""},{"location":"#how-it-works","title":"How it Works","text":"

The following diagram illustrates Qodo Merge tools and their flow:

Check out the core abilities page for a comprehensive overview of the variety of core abilities used by Qodo Merge.

"},{"location":"chrome-extension/","title":"Qodo Merge Chrome Extension","text":"

Qodo Merge Chrome extension is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.

With a single-click installation you will gain access to a context-aware chat on your pull requests code, a toolbar extension with multiple AI feedbacks, Qodo Merge filters, and additional abilities.

The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.

For private repositories, you will need to install Qodo Merge Pro in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed). For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided link.

"},{"location":"chrome-extension/#supported-browsers","title":"Supported browsers","text":"

The extension is supported on all Chromium-based browsers, including Google Chrome, Arc, Opera, Brave, and Microsoft Edge.

"},{"location":"chrome-extension/data_privacy/","title":"Data Privacy","text":"

We take your code's security and privacy seriously:

  • The Chrome extension will not send your code to any external servers.
  • For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.
"},{"location":"chrome-extension/features/","title":"Features","text":""},{"location":"chrome-extension/features/#pr-chat","title":"PR chat","text":"

The PR-Chat feature allows to freely chat with your PR code, within your GitHub environment. It will seamlessly use the PR as context to your chat session, and provide AI-powered feedback.

To enable private chat, simply install the Qodo Merge Chrome extension. After installation, each PR's file-changed tab will include a chat box, where you may ask questions about your code. This chat session is private, and won't be visible to other users.

All open-source repositories are supported. For private repositories, you will also need to install Qodo Merge Pro, After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.

"},{"location":"chrome-extension/features/#context-aware-pr-chat","title":"Context-aware PR chat","text":"

Qodo Merge constructs a comprehensive context for each pull request, incorporating the PR description, commit messages, and code changes with extended dynamic context. This contextual information, along with additional PR-related data, forms the foundation for an AI-powered chat session. The agent then leverages this rich context to provide intelligent, tailored responses to user inquiries about the pull request.

"},{"location":"chrome-extension/features/#toolbar-extension","title":"Toolbar extension","text":"

With Qodo Merge Chrome extension, it's easier than ever to interactively configure and experiment with the different tools and configuration options.

For private repositories, after you found the setup that works for you, you can also easily export it as a persistent configuration file, and use it for automatic commands.

"},{"location":"chrome-extension/features/#qodo-merge-filters","title":"Qodo Merge filters","text":"

Qodo Merge filters is a sidepanel option. that allows you to filter different message in the conversation tab.

For example, you can choose to present only message from Qodo Merge, or filter those messages, focusing only on user's comments.

"},{"location":"chrome-extension/features/#enhanced-code-suggestions","title":"Enhanced code suggestions","text":"

Qodo Merge Chrome extension adds the following capabilities to code suggestions tool's comments:

  • Auto-expand the table when you are viewing a code block, to avoid clipping.
  • Adding a \"quote-and-reply\" button, that enables to address and comment on a specific suggestion (for example, asking the author to fix the issue)

"},{"location":"core-abilities/","title":"Core Abilities","text":"

Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:

  • Local and global metadata
  • Dynamic context
  • Self-reflection
  • Impact evaluation
  • Interactivity
  • Compression strategy
  • Code-oriented YAML
  • Static code analysis
  • Code fine-tuning benchmark
"},{"location":"core-abilities/#blogs","title":"Blogs","text":"

Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks. These resources provide more comprehensive insights into leveraging LLMs for software development.

"},{"location":"core-abilities/#code-generation-and-llms","title":"Code Generation and LLMs","text":"
  • State-of-the-art Code Generation with AlphaCodium \u2013 From Prompt Engineering to Flow Engineering
  • RAG for a Codebase with 10k Repos
"},{"location":"core-abilities/#development-processes","title":"Development Processes","text":"
  • Understanding the Challenges and Pain Points of the Pull Request Cycle
  • Introduction to Code Coverage Testing
"},{"location":"core-abilities/#cost-optimization","title":"Cost Optimization","text":"
  • Reduce Your Costs by 30% When Using GPT for Python Code
"},{"location":"core-abilities/code_oriented_yaml/","title":"Code-oriented YAML","text":""},{"location":"core-abilities/code_oriented_yaml/#overview","title":"Overview","text":"

TBD

"},{"location":"core-abilities/compression_strategy/","title":"Compression strategy","text":""},{"location":"core-abilities/compression_strategy/#overview-pr-compression-strategy","title":"Overview - PR Compression Strategy","text":"

There are two scenarios:

  1. The PR is small enough to fit in a single prompt (including system and user prompt)
  2. The PR is too large to fit in a single prompt (including system and user prompt)

For both scenarios, we first use the following strategy

"},{"location":"core-abilities/compression_strategy/#repo-language-prioritization-strategy","title":"Repo language prioritization strategy","text":"

We prioritize the languages of the repo based on the following criteria:

  1. Exclude binary files and non code files (e.g. images, pdfs, etc)
  2. Given the main languages used in the repo
  3. We sort the PR files by the most common languages in the repo (in descending order):
  4. [[file.py, file2.py],[file3.js, file4.jsx],[readme.md]]
"},{"location":"core-abilities/compression_strategy/#small-pr","title":"Small PR","text":"

In this case, we can fit the entire PR in a single prompt: 1. Exclude binary files and non code files (e.g. images, pdfs, etc) 2. We Expand the surrounding context of each patch to 3 lines above and below the patch

"},{"location":"core-abilities/compression_strategy/#large-pr","title":"Large PR","text":""},{"location":"core-abilities/compression_strategy/#motivation","title":"Motivation","text":"

Pull Requests can be very long and contain a lot of information with varying degree of relevance to the pr-agent. We want to be able to pack as much information as possible in a single LMM prompt, while keeping the information relevant to the pr-agent.

"},{"location":"core-abilities/compression_strategy/#compression-strategy","title":"Compression strategy","text":"

We prioritize additions over deletions: - Combine all deleted files into a single list (deleted files) - File patches are a list of hunks, remove all hunks of type deletion-only from the hunks in the file patch

"},{"location":"core-abilities/compression_strategy/#adaptive-and-token-aware-file-patch-fitting","title":"Adaptive and token-aware file patch fitting","text":"

We use tiktoken to tokenize the patches after the modifications described above, and we use the following strategy to fit the patches into the prompt:

  1. Within each language we sort the files by the number of tokens in the file (in descending order):
    • [[file2.py, file.py],[file4.jsx, file3.js],[readme.md]]
  2. Iterate through the patches in the order described above
  3. Add the patches to the prompt until the prompt reaches a certain buffer from the max token length
  4. If there are still patches left, add the remaining patches as a list called other modified files to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
  5. If we haven't reached the max token length, add the deleted files to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
"},{"location":"core-abilities/compression_strategy/#example","title":"Example","text":""},{"location":"core-abilities/dynamic_context/","title":"Dynamic context","text":""},{"location":"core-abilities/dynamic_context/#tldr","title":"TL;DR","text":"

Qodo Merge uses an asymmetric and dynamic context strategy to improve AI analysis of code changes in pull requests. It provides more context before changes than after, and dynamically adjusts the context based on code structure (e.g., enclosing functions or classes). This approach balances providing sufficient context for accurate analysis, while avoiding needle-in-the-haystack information overload that could degrade AI performance or exceed token limits.

"},{"location":"core-abilities/dynamic_context/#introduction","title":"Introduction","text":"

Pull request code changes are retrieved in a unified diff format, showing three lines of context before and after each modified section, with additions marked by '+' and deletions by '-'.

@@ -12,5 +12,5 @@ def func1():\n code line that already existed in the file...\n code line that already existed in the file...\n code line that already existed in the file....\n-code line that was removed in the PR\n+new code line added in the PR\n code line that already existed in the file...\n code line that already existed in the file...\n code line that already existed in the file...\n\n@@ -26,2 +26,4 @@ def func2():\n...\n

This unified diff format can be challenging for AI models to interpret accurately, as it provides limited context for understanding the full scope of code changes. The presentation of code using '+', '-', and ' ' symbols to indicate additions, deletions, and unchanged lines respectively also differs from the standard code formatting typically used to train AI models.

"},{"location":"core-abilities/dynamic_context/#challenges-of-expanding-the-context-window","title":"Challenges of expanding the context window","text":"

While expanding the context window is technically feasible, it presents a more fundamental trade-off:

Pros:

  • Enhanced context allows the model to better comprehend and localize the code changes, results (potentially) in more precise analysis and suggestions. Without enough context, the model may struggle to understand the code changes and provide relevant feedback.

Cons:

  • Excessive context may overwhelm the model with extraneous information, creating a \"needle in a haystack\" scenario where focusing on the relevant details (the code that actually changed) becomes challenging. LLM quality is known to degrade when the context gets larger. Pull requests often encompass multiple changes across many files, potentially spanning hundreds of lines of modified code. This complexity presents a genuine risk of overwhelming the model with excessive context.

  • Increased context expands the token count, increasing processing time and cost, and may prevent the model from processing the entire pull request in a single pass.

"},{"location":"core-abilities/dynamic_context/#asymmetric-and-dynamic-context","title":"Asymmetric and dynamic context","text":"

To address these challenges, Qodo Merge employs an asymmetric and dynamic context strategy, providing the model with more focused and relevant context information for each code change.

Asymmetric:

We start by recognizing that the context preceding a code change is typically more crucial for understanding the modification than the context following it. Consequently, Qodo Merge implements an asymmetric context policy, decoupling the context window into two distinct segments: one for the code before the change and another for the code after.

By independently adjusting each context window, Qodo Merge can supply the model with a more tailored and pertinent context for individual code changes.

Dynamic:

We also employ a \"dynamic\" context strategy. We start by recognizing that the optimal context for a code change often corresponds to its enclosing code component (e.g., function, class), rather than a fixed number of lines. Consequently, we dynamically adjust the context window based on the code's structure, ensuring the model receives the most pertinent information for each modification.

To prevent overwhelming the model with excessive context, we impose a limit on the number of lines searched when identifying the enclosing component. This balance allows for comprehensive understanding while maintaining efficiency and limiting context token usage.

"},{"location":"core-abilities/dynamic_context/#appendix-relevant-configuration-options","title":"Appendix - relevant configuration options","text":"
[config]\npatch_extension_skip_types =[\".md\",\".txt\"]  # Skip files with these extensions when trying to extend the context\nallow_dynamic_context=true                  # Allow dynamic context extension\nmax_extra_lines_before_dynamic_context = 8  # will try to include up to X extra lines before the hunk in the patch, until we reach an enclosing function or class\npatch_extra_lines_before = 3                # Number of extra lines (+3 default ones) to include before each hunk in the patch\npatch_extra_lines_after = 1                 # Number of extra lines (+3 default ones) to include after each hunk in the patch\n
"},{"location":"core-abilities/impact_evaluation/","title":"Overview - Impact Evaluation \ud83d\udc8e","text":"

Demonstrating the return on investment (ROI) of AI-powered initiatives is crucial for modern organizations. To address this need, Qodo Merge has developed an AI impact measurement tools and metrics, providing advanced analytics to help businesses quantify the tangible benefits of AI adoption in their PR review process.

"},{"location":"core-abilities/impact_evaluation/#auto-impact-validator-real-time-tracking-of-implemented-qodo-merge-suggestions","title":"Auto Impact Validator - Real-Time Tracking of Implemented Qodo Merge Suggestions","text":""},{"location":"core-abilities/impact_evaluation/#how-it-works","title":"How It Works","text":"

When a user pushes a new commit to the pull request, Qodo Merge automatically compares the updated code against the previous suggestions, marking them as implemented if the changes address these recommendations, whether directly or indirectly:

  1. Direct Implementation: The user directly addresses the suggestion as-is in the PR, either by clicking on the \"apply code suggestion\" checkbox or by making the changes manually.
  2. Indirect Implementation: Qodo Merge recognizes when a suggestion's intent is fulfilled, even if the exact code changes differ from the original recommendation. It marks these suggestions as implemented, acknowledging that users may achieve the same goal through alternative solutions.
"},{"location":"core-abilities/impact_evaluation/#real-time-visual-feedback","title":"Real-Time Visual Feedback","text":"

Upon confirming that a suggestion was implemented, Qodo Merge automatically adds a \u2705 (check mark) to the relevant suggestion, enabling transparent tracking of Qodo Merge's impact analysis. Qodo Merge will also add, inside the relevant suggestions, an explanation of how the new code was impacted by each suggestion.

"},{"location":"core-abilities/impact_evaluation/#dashboard-metrics","title":"Dashboard Metrics","text":"

The dashboard provides macro-level insights into the overall impact of Qodo Merge on the pull-request process with key productivity metrics.

By offering clear, data-driven evidence of Qodo Merge's impact, it empowers leadership teams to make informed decisions about the tool's effectiveness and ROI.

Here are key metrics that the dashboard tracks:

"},{"location":"core-abilities/impact_evaluation/#qodo-merge-impacts-per-1k-lines","title":"Qodo Merge Impacts per 1K Lines","text":"

Explanation: for every 1K lines of code (additions/edits), Qodo Merge had on average ~X suggestions implemented.

Why This Metric Matters:

  1. Standardized and Comparable Measurement: By measuring impacts per 1K lines of code additions, you create a standardized metric that can be compared across different projects, teams, customers, and time periods. This standardization is crucial for meaningful analysis, benchmarking, and identifying where Qodo Merge is most effective.
  2. Accounts for PR Variability and Incentivizes Quality: This metric addresses the fact that \"Not all PRs are created equal.\" By normalizing against lines of code rather than PR count, you account for the variability in PR sizes and focus on the quality and impact of suggestions rather than just the number of PRs affected.
  3. Quantifies Value and ROI: The metric directly correlates with the value Qodo Merge is providing, showing how frequently it offers improvements relative to the amount of new code being written. This provides a clear, quantifiable way to demonstrate Qodo Merge's return on investment to stakeholders.
"},{"location":"core-abilities/impact_evaluation/#suggestion-effectiveness-across-categories","title":"Suggestion Effectiveness Across Categories","text":"

Explanation: This chart illustrates the distribution of implemented suggestions across different categories, enabling teams to better understand Qodo Merge's impact on various aspects of code quality and development practices.

"},{"location":"core-abilities/impact_evaluation/#suggestion-score-distribution","title":"Suggestion Score Distribution","text":"

Explanation: The distribution of the suggestion score for the implemented suggestions, ensuring that higher-scored suggestions truly represent more significant improvements.

"},{"location":"core-abilities/interactivity/","title":"Interactivity","text":""},{"location":"core-abilities/interactivity/#interactive-invocation","title":"Interactive invocation \ud83d\udc8e","text":"

TBD

"},{"location":"core-abilities/metadata/","title":"Local and global metadata","text":""},{"location":"core-abilities/metadata/#local-and-global-metadata-injection-with-multi-stage-analysis","title":"Local and global metadata injection with multi-stage analysis","text":"

(1) Qodo Merge initially retrieves for each PR the following data:

  • PR title and branch name
  • PR original description
  • Commit messages history
  • PR diff patches, in hunk diff format
  • The entire content of the files that were modified in the PR

Tip: Organization-level metadata

In addition to the inputs above, Qodo Merge can incorporate supplementary preferences provided by the user, like extra_instructions and organization best practices. This information can be used to enhance the PR analysis.

(2) By default, the first command that Qodo Merge executes is describe, which generates three types of outputs:

  • PR Type (e.g. bug fix, feature, refactor, etc)
  • PR Description - a bullet point summary of the PR
  • Changes walkthrough - for each modified file, provide a one-line summary followed by a detailed bullet point list of the changes.

These AI-generated outputs are now considered as part of the PR metadata, and can be used in subsequent commands like review and improve. This effectively enables multi-stage chain-of-thought analysis, without doing any additional API calls which will cost time and money.

For example, when generating code suggestions for different files, Qodo Merge can inject the AI-generated \"Changes walkthrough\" file summary in the prompt:

## File: 'src/file1.py'\n### AI-generated file summary:\n- edited function `func1` that does X\n- Removed function `func2` that was not used\n- ....\n\n@@ ... @@ def func1():\n__new hunk__\n11  unchanged code line0 in the PR\n12  unchanged code line1 in the PR\n13 +new code line2 added in the PR\n14  unchanged code line3 in the PR\n__old hunk__\n unchanged code line0\n unchanged code line1\n-old code line2 removed in the PR\n unchanged code line3\n\n@@ ... @@ def func2():\n__new hunk__\n...\n__old hunk__\n...\n

(3) The entire PR files that were retrieved are also used to expand and enhance the PR context (see Dynamic Context).

(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level. This comprehensive approach enables Qodo Merge AI models to generate more precise and contextually relevant suggestions and feedback.

"},{"location":"core-abilities/self_reflection/","title":"Self-reflection","text":""},{"location":"core-abilities/self_reflection/#tldr","title":"TL;DR","text":"

Qodo Merge implements a self-reflection process where the AI model reflects, scores, and re-ranks its own suggestions, eliminating irrelevant or incorrect ones. This approach improves the quality and relevance of suggestions, saving users time and enhancing their experience. Configuration options allow users to set a score threshold for further filtering out suggestions.

"},{"location":"core-abilities/self_reflection/#introduction-efficient-review-with-hierarchical-presentation","title":"Introduction - Efficient Review with Hierarchical Presentation","text":"

Given that not all generated code suggestions will be relevant, it is crucial to enable users to review them in a fast and efficient way, allowing quick identification and filtering of non-applicable ones.

To achieve this goal, Qodo Merge offers a dedicated hierarchical structure when presenting suggestions to users:

  • A \"category\" section groups suggestions by their category, allowing users to quickly dismiss irrelevant suggestions.
  • Each suggestion is first described by a one-line summary, which can be expanded to a full description by clicking on a collapsible.
  • Upon expanding a suggestion, the user receives a more comprehensive description, and a code snippet demonstrating the recommendation.

Fast Review

This hierarchical structure is designed to facilitate rapid review of each suggestion, with users spending an average of ~5-10 seconds per item.

"},{"location":"core-abilities/self_reflection/#self-reflection-and-re-ranking","title":"Self-reflection and Re-ranking","text":"

The AI model is initially tasked with generating suggestions, and outputting them in order of importance. However, in practice we observe that models often struggle to simultaneously generate high-quality code suggestions and rank them well in a single pass. Furthermore, the initial set of generated suggestions sometimes contains easily identifiable errors.

To address these issues, we implemented a \"self-reflection\" process that refines suggestion ranking and eliminates irrelevant or incorrect proposals. This process consists of the following steps:

  1. Presenting the generated suggestions to the model in a follow-up call.
  2. Instructing the model to score each suggestion on a scale of 0-10 and provide a rationale for the assigned score.
  3. Utilizing these scores to re-rank the suggestions and filter out incorrect ones (with a score of 0).
  4. Optionally, filtering out all suggestions below a user-defined score threshold.

Note that presenting all generated suggestions simultaneously provides the model with a comprehensive context, enabling it to make more informed decisions compared to evaluating each suggestion individually.

To conclude, the self-reflection process enables Qodo Merge to prioritize suggestions based on their importance, eliminate inaccurate or irrelevant proposals, and optionally exclude suggestions that fall below a specified threshold of significance. This results in a more refined and valuable set of suggestions for the user, saving time and improving the overall experience.

"},{"location":"core-abilities/self_reflection/#example-results","title":"Example Results","text":""},{"location":"core-abilities/self_reflection/#appendix-relevant-configuration-options","title":"Appendix - Relevant Configuration Options","text":"
[pr_code_suggestions]\nself_reflect_on_suggestions = true # Enable self-reflection on code suggestions\nsuggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)\n
"},{"location":"core-abilities/static_code_analysis/","title":"Static code analysis","text":""},{"location":"core-abilities/static_code_analysis/#overview-static-code-analysis","title":"Overview - Static Code Analysis \ud83d\udc8e","text":"

By combining static code analysis with LLM capabilities, Qodo Merge can provide a comprehensive analysis of the PR code changes on a component level.

It scans the PR code changes, finds all the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.

Language that are currently supported:

Python, Java, C++, JavaScript, TypeScript, C#.

"},{"location":"core-abilities/static_code_analysis/#capabilities","title":"Capabilities","text":""},{"location":"core-abilities/static_code_analysis/#analyze-pr","title":"Analyze PR","text":"

The analyze tool enables to interactively generate tests, docs, code suggestions and similar code search for each component that changed in the PR. It can be invoked manually by commenting on any PR:

/analyze\n

An example result:

Clicking on each checkbox will trigger the relevant tool for the selected component.

"},{"location":"core-abilities/static_code_analysis/#generate-tests","title":"Generate Tests","text":"

The test tool generate tests for a selected component, based on the PR code changes. It can be invoked manually by commenting on any PR:

/test component_name\n
where 'component_name' is the name of a specific component in the PR, Or be triggered interactively by using the analyze tool.

"},{"location":"core-abilities/static_code_analysis/#generate-docs-for-a-component","title":"Generate Docs for a Component","text":"

The add_docs tool scans the PR code changes, and automatically generate docstrings for any code components that changed in the PR. It can be invoked manually by commenting on any PR:

/add_docs component_name\n

Or be triggered interactively by using the analyze tool.

"},{"location":"core-abilities/static_code_analysis/#generate-code-suggestions-for-a-component","title":"Generate Code Suggestions for a Component","text":"

The improve_component tool generates code suggestions for a specific code component that changed in the PR. It can be invoked manually by commenting on any PR:

/improve_component component_name\n

Or be triggered interactively by using the analyze tool.

"},{"location":"core-abilities/static_code_analysis/#find-similar-code","title":"Find Similar Code","text":"

The similar code tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.

For example:

Global Search for a method called chat_completion:

"},{"location":"faq/","title":"FAQ","text":"Question: Can Qodo Merge serve as a substitute for a human reviewer? Question: I received an incorrect or irrelevant suggestion. Why? Question: How can I get more tailored suggestions? Question: Will you store my code ? Are you using my code to train models? Question: Can I use my own LLM keys with Qodo Merge?"},{"location":"faq/#answer1","title":"Answer:1","text":"

Qodo Merge is designed to assist, not replace, human reviewers.

Reviewing PRs is a tedious and time-consuming task often seen as a \"chore\". In addition, the longer the PR \u2013 the shorter the relative feedback, since long PRs can overwhelm reviewers, both in terms of technical difficulty, and the actual review time. Qodo Merge aims to address these pain points, and to assist and empower both the PR author and reviewer.

However, Qodo Merge has built-in safeguards to ensure the developer remains in the driver's seat. For example:

  1. Preserves user's original PR header
  2. Places user's description above the AI-generated PR description
  3. Cannot approve PRs; approval remains reviewer's responsibility
  4. The code suggestions are optional, and aim to:
    • Encourage self-review and self-reflection
    • Highlight potential bugs or oversights
    • Enhance code quality and promote best practices

Read more about this issue in our blog

"},{"location":"faq/#answer2","title":"Answer:2","text":"
  • Modern AI models, like Claude 3.5 Sonnet and GPT-4, are improving rapidly but remain imperfect. Users should critically evaluate all suggestions rather than accepting them automatically.
  • AI errors are rare, but possible. A main value from reviewing the code suggestions lies in their high probability of catching mistakes or bugs made by the PR author. We believe it's worth spending 30-60 seconds reviewing suggestions, even if some aren't relevant, as this practice can enhances code quality and prevent bugs in production.

  • The hierarchical structure of the suggestions is designed to help the user to quickly understand them, and to decide which ones are relevant and which are not:

    • Only if the Category header is relevant, the user should move to the summarized suggestion description.
    • Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
  • In addition, we recommend to use the extra_instructions field to guide the model to suggestions that are more relevant to the specific needs of the project.

  • The interactive PR chat also provides an easy way to get more tailored suggestions and feedback from the AI model.
"},{"location":"faq/#answer3","title":"Answer:3","text":"

See here for more information on how to use the extra_instructions and best_practices configuration options, to guide the model to more tailored suggestions.

"},{"location":"faq/#answer4","title":"Answer:4","text":"

No. Qodo Merge strict privacy policy ensures that your code is not stored or used for training purposes.

For a detailed overview of our data privacy policy, please refer to this link

"},{"location":"faq/#answer5","title":"Answer:5","text":"

When you self-host, you use your own keys.

Qodo Merge Pro with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys. For enterprise customers, on-prem deployment is also available. Contact us for more information.

"},{"location":"finetuning_benchmark/","title":"Qodo Merge Code Fine-tuning Benchmark","text":"

On coding tasks, the gap between open-source models and top closed-source models such as GPT4 is significant. In practice, open-source models are unsuitable for most real-world code tasks, and require further fine-tuning to produce acceptable results.

Qodo Merge fine-tuning benchmark aims to benchmark open-source models on their ability to be fine-tuned for a coding task. Specifically, we chose to fine-tune open-source models on the task of analyzing a pull request, and providing useful feedback and code suggestions.

Here are the results:

Model performance:

Model name Model size [B] Better than gpt-4 rate, after fine-tuning [%] DeepSeek 34B-instruct 34 40.7 DeepSeek 34B-base 34 38.2 Phind-34b 34 38 Granite-34B 34 37.6 Codestral-22B-v0.1 22 32.7 QWEN-1.5-32B 32 29 CodeQwen1.5-7B 7 35.4 Llama-3.1-8B-Instruct 8 35.2 Granite-8b-code-instruct 8 34.2 CodeLlama-7b-hf 7 31.8 Gemma-7B 7 27.2 DeepSeek coder-7b-instruct 7 26.8 Llama-3-8B-Instruct 8 26.8 Mistral-7B-v0.1 7 16.1

Fine-tuning impact:

Model name Model size [B] Fine-tuned Better than gpt-4 rate [%] DeepSeek 34B-instruct 34 yes 40.7 DeepSeek 34B-instruct 34 no 3.6"},{"location":"finetuning_benchmark/#results-analysis","title":"Results analysis","text":"
  • Fine-tuning is a must - without fine-tuning, open-source models provide poor results on most real-world code tasks, which include complicated prompt and lengthy context. We clearly see that without fine-tuning, deepseek model was 96.4% of the time inferior to GPT-4, while after fine-tuning, it is better 40.7% of the time.
  • Always start from a code-dedicated model \u2014 When fine-tuning, always start from a code-dedicated model, and not from a general-usage model. The gaps in downstream results are very big.
  • Don't believe the hype \u2014newer models, or models from big-tech companies (Llama3, Gemma, Mistral), are not always better for fine-tuning.
  • The best large model - For large 34B code-dedicated models, the gaps when doing proper fine-tuning are small. The current top model is DeepSeek 34B-instruct
  • The best small model - For small 7B code-dedicated models, the gaps when fine-tuning are much larger. CodeQWEN 1.5-7B is by far the best model for fine-tuning.
  • Base vs. instruct - For the top model (deepseek), we saw small advantage when starting from the instruct version. However, we recommend testing both versions on each specific task, as the base model is generally considered more suitable for fine-tuning.
"},{"location":"finetuning_benchmark/#the-dataset","title":"The dataset","text":""},{"location":"finetuning_benchmark/#training-dataset","title":"Training dataset","text":"

Our training dataset comprises 25,000 pull requests, aggregated from permissive license repos. For each pull request, we generated responses for the three main tools of Qodo Merge: Describe, Review and Improve.

On the raw data collected, we employed various automatic and manual cleaning techniques to ensure the outputs were of the highest quality, and suitable for instruct-tuning.

Here are the prompts, and example outputs, used as input-output pairs to fine-tune the models:

Tool Prompt Example output Describe link link Review link link Improve link link"},{"location":"finetuning_benchmark/#evaluation-dataset","title":"Evaluation dataset","text":"
  • For each tool, we aggregated 100 additional examples to be used for evaluation. These examples were not used in the training dataset, and were manually selected to represent diverse real-world use-cases.
  • For each test example, we generated two responses: one from the fine-tuned model, and one from the best code model in the world, gpt-4-turbo-2024-04-09.

  • We used a third LLM to judge which response better answers the prompt, and will likely be perceived by a human as better response.

We experimented with three model as judges: gpt-4-turbo-2024-04-09, gpt-4o, and claude-3-opus-20240229. All three produced similar results, with the same ranking order. This strengthens the validity of our testing protocol. The evaluation prompt can be found here

Here is an example of a judge model feedback:

command: improve\nmodel1_score: 9,\nmodel2_score: 6,\nwhy: |\n  Response 1 is better because it provides more actionable and specific suggestions that directly \n  enhance the code's maintainability, performance, and best practices. For example, it suggests \n  using a variable for reusable widget instances and using named routes for navigation, which \n  are practical improvements. In contrast, Response 2 focuses more on general advice and less \n  actionable suggestions, such as changing variable names and adding comments, which are less \n  critical for immediate code improvement.\"\n
"},{"location":"installation/","title":"Installation","text":""},{"location":"installation/#self-hosted-qodo-merge","title":"Self-hosted Qodo Merge","text":"

If you choose to host your own Qodo Merge, you first need to acquire two tokens:

  1. An OpenAI key from here, with access to GPT-4 (or a key for other language models, if you prefer).
  2. A GitHub\\GitLab\\BitBucket personal access token (classic), with the repo scope. [GitHub from here]

There are several ways to use self-hosted Qodo Merge:

  • Locally
  • GitHub
  • GitLab
  • BitBucket
  • Azure DevOps
"},{"location":"installation/#qodo-merge-pro","title":"Qodo Merge Pro \ud83d\udc8e","text":"

Qodo Merge Pro, an app hosted by CodiumAI for GitHub\\GitLab\\BitBucket, is also available. With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo. See here for more details.

"},{"location":"installation/azure/","title":"Azure DevOps","text":""},{"location":"installation/azure/#azure-devops-pipeline","title":"Azure DevOps Pipeline","text":"

You can use a pre-built Action Docker image to run Qodo Merge as an Azure devops pipeline. add the following file to your repository under azure-pipelines.yml:

# Opt out of CI triggers\ntrigger: none\n\n# Configure PR trigger\npr:\n  branches:\n    include:\n    - '*'\n  autoCancel: true\n  drafts: false\n\nstages:\n- stage: pr_agent\n  displayName: 'PR Agent Stage'\n  jobs:\n  - job: pr_agent_job\n    displayName: 'PR Agent Job'\n    pool:\n      vmImage: 'ubuntu-latest'\n    container:\n      image: codiumai/pr-agent:latest\n      options: --entrypoint \"\"\n    variables:\n      - group: pr_agent\n    steps:\n    - script: |\n        echo \"Running PR Agent action step\"\n\n        # Construct PR_URL\n        PR_URL=\"${SYSTEM_COLLECTIONURI}${SYSTEM_TEAMPROJECT}/_git/${BUILD_REPOSITORY_NAME}/pullrequest/${SYSTEM_PULLREQUEST_PULLREQUESTID}\"\n        echo \"PR_URL=$PR_URL\"\n\n        # Extract organization URL from System.CollectionUri\n        ORG_URL=$(echo \"$(System.CollectionUri)\" | sed 's/\\/$//') # Remove trailing slash if present\n        echo \"Organization URL: $ORG_URL\"\n\n        export azure_devops__org=\"$ORG_URL\"\n        export config__git_provider=\"azure\"\n\n        pr-agent --pr_url=\"$PR_URL\" describe\n        pr-agent --pr_url=\"$PR_URL\" review\n        pr-agent --pr_url=\"$PR_URL\" improve\n      env:\n        azure_devops__pat: $(azure_devops_pat)\n        openai__key: $(OPENAI_KEY)\n      displayName: 'Run Qodo Merge'\n
This script will run Qodo Merge on every new merge request, with the improve, review, and describe commands. Note that you need to export the azure_devops__pat and OPENAI_KEY variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):

Make sure to give pipeline permissions to the pr_agent variable group.

"},{"location":"installation/azure/#azure-devops-from-cli","title":"Azure DevOps from CLI","text":"

To use Azure DevOps provider use the following settings in configuration.toml:

[config]\ngit_provider=\"azure\"\n

Azure DevOps provider supports PAT token or DefaultAzureCredential authentication. PAT is faster to create, but has build in expiration date, and will use the user identity for API calls. Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.

If PAT was chosen, you can assign the value in .secrets.toml. If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly, or use managed identity/az cli (for local development) without any additional configuration. in any case, 'org' value must be assigned in .secrets.toml:

[azure_devops]\norg = \"https://dev.azure.com/YOUR_ORGANIZATION/\"\n# pat = \"YOUR_PAT_TOKEN\" needed only if using PAT for authentication\n

"},{"location":"installation/azure/#azure-devops-webhook","title":"Azure DevOps Webhook","text":"

To trigger from an Azure webhook, you need to manually add a webhook. Use the \"Pull request created\" type to trigger a review, or \"Pull request commented on\" to trigger any supported comment with / comment on the relevant PR. Note that for the \"Pull request commented on\" trigger, only API v2.0 is supported.

For webhook security, create a sporadic username/password pair and configure the webhook username and password on both the server and Azure DevOps webhook. These will be sent as basic Auth data by the webhook with each request:

[azure_devops_server]\nwebhook_username = \"<basic auth user>\"\nwebhook_password = \"<basic auth password>\"\n

Ensure that the webhook endpoint is only accessible over HTTPS to mitigate the risk of credential interception when using basic authentication.

"},{"location":"installation/bitbucket/","title":"BitBucket","text":""},{"location":"installation/bitbucket/#run-as-a-bitbucket-pipeline","title":"Run as a Bitbucket Pipeline","text":"

You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.

  1. Add the following file in your repository bitbucket_pipelines.yml
pipelines:\n    pull-requests:\n      '**':\n        - step:\n            name: PR Agent Review\n            image: python:3.10\n            services:\n              - docker\n            script:\n              - docker run -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=https://bitbucket.org/$BITBUCKET_WORKSPACE/$BITBUCKET_REPO_SLUG/pull-requests/$BITBUCKET_PR_ID review\n
  1. Add the following secure variables to your repository under Repository settings > Pipelines > Repository variables. OPENAI_API_KEY: <your key> BITBUCKET_BEARER_TOKEN: <your token>

You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.

Note that comments on a PR are not supported in Bitbucket Pipeline.

"},{"location":"installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app","title":"Run using CodiumAI-hosted Bitbucket app \ud83d\udc8e","text":"

Please contact visit Qodo Merge Pro if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the bitbucket_app.py implementation.

"},{"location":"installation/bitbucket/#bitbucket-server-and-data-center","title":"Bitbucket Server and Data Center","text":"

Login into your on-prem instance of Bitbucket with your service account username and password. Navigate to Manage account, HTTP Access tokens, Create Token. Generate the token and add it to .secret.toml under bitbucket_server section

[bitbucket_server]\nbearer_token = \"<your key>\"\n
"},{"location":"installation/bitbucket/#run-it-as-cli","title":"Run it as CLI","text":"

Modify configuration.toml:

git_provider=\"bitbucket_server\"\n

and pass the Pull request URL:

python cli.py --pr_url https://git.onpreminstanceofbitbucket.com/projects/PROJECT/repos/REPO/pull-requests/1 review\n

"},{"location":"installation/bitbucket/#run-it-as-service","title":"Run it as service","text":"

To run Qodo Merge as webhook, build the docker image:

docker build . -t codiumai/pr-agent:bitbucket_server_webhook --target bitbucket_server_webhook -f docker/Dockerfile\ndocker push codiumai/pr-agent:bitbucket_server_webhook  # Push to your Docker repository\n

Navigate to Projects or Repositories, Settings, Webhooks, Create Webhook. Fill the name and URL, Authentication None select the Pull Request Opened checkbox to receive that event as webhook.

The URL should end with /webhook, for example: https://domain.com/webhook

"},{"location":"installation/github/","title":"GitHub","text":""},{"location":"installation/github/#run-as-a-github-action","title":"Run as a GitHub Action","text":"

You can use our pre-built Github Action Docker image to run Qodo Merge as a Github Action.

1) Add the following file to your repository under .github/workflows/pr_agent.yml:

on:\n  pull_request:\n    types: [opened, reopened, ready_for_review]\n  issue_comment:\njobs:\n  pr_agent_job:\n    if: ${{ github.event.sender.type != 'Bot' }}\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      pull-requests: write\n      contents: write\n    name: Run pr agent on every pull request, respond to user comments\n    steps:\n      - name: PR Agent action step\n        id: pragent\n        uses: Codium-ai/pr-agent@main\n        env:\n          OPENAI_KEY: ${{ secrets.OPENAI_KEY }}\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n

2) Add the following secret to your repository under Settings > Secrets and variables > Actions > New repository secret > Add secret:

Name = OPENAI_KEY\nSecret = <your key>\n

The GITHUB_TOKEN secret is automatically created by GitHub.

3) Merge this change to your main branch. When you open your next PR, you should see a comment from github-actions bot with a review of your PR, and instructions on how to use the rest of the tools.

4) You may configure Qodo Merge by adding environment variables under the env section corresponding to any configurable property in the configuration file. Some examples:

      env:\n        # ... previous environment values\n        OPENAI.ORG: \"<Your organization name under your OpenAI account>\"\n        PR_REVIEWER.REQUIRE_TESTS_REVIEW: \"false\" # Disable tests review\n        PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions\n
See detailed usage instructions in the USAGE GUIDE

"},{"location":"installation/github/#using-a-specific-release","title":"Using a specific release","text":"

if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use:

...\n    steps:\n      - name: PR Agent action step\n        id: pragent\n        uses: docker://codiumai/pr-agent:0.23-github_action\n...\n

For enhanced security, you can also specify the Docker image by its digest:

...\n    steps:\n      - name: PR Agent action step\n        id: pragent\n        uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64\n...\n

"},{"location":"installation/github/#action-for-github-enterprise-server","title":"Action for GitHub enterprise server","text":"

To use the action with a GitHub enterprise server, add an environment variable GITHUB.BASE_URL with the API URL of your GitHub server.

For example, if your GitHub server is at https://github.mycompany.com, add the following to your workflow file:

      env:\n        # ... previous environment values\n        GITHUB.BASE_URL: \"https://github.mycompany.com/api/v3\"\n

"},{"location":"installation/github/#run-as-a-github-app","title":"Run as a GitHub App","text":"

Allowing you to automate the review process on your private or public repositories.

1) Create a GitHub App from the Github Developer Portal.

  • Set the following permissions:
    • Pull requests: Read & write
    • Issue comment: Read & write
    • Metadata: Read-only
    • Contents: Read-only
  • Set the following events:
    • Issue comment
    • Pull request
    • Push (if you need to enable triggering on PR update)

2) Generate a random secret for your app, and save it for later. For example, you can use:

WEBHOOK_SECRET=$(python -c \"import secrets; print(secrets.token_hex(10))\")\n

3) Acquire the following pieces of information from your app's settings page:

  • App private key (click \"Generate a private key\" and save the file)
  • App ID

4) Clone this repository:

git clone https://github.com/Codium-ai/pr-agent.git\n

5) Copy the secrets template file and fill in the following:

cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml\n# Edit .secrets.toml file\n
  • Your OpenAI key.
  • Copy your app's private key to the private_key field.
  • Copy your app's ID to the app_id field.
  • Copy your app's webhook secret to the webhook_secret field.
  • Set deployment_type to 'app' in configuration.toml

    The .secrets.toml file is not copied to the Docker image by default, and is only used for local development. If you want to use the .secrets.toml file in your Docker image, you can add remove it from the .dockerignore file. In most production environments, you would inject the secrets file as environment variables or as mounted volumes. For example, in order to inject a secrets file as a volume in a Kubernetes environment you can update your pod spec to include the following, assuming you have a secret named pr-agent-settings with a key named .secrets.toml:

           volumes:\n        - name: settings-volume\n          secret:\n            secretName: pr-agent-settings\n// ...\n       containers:\n// ...\n          volumeMounts:\n            - mountPath: /app/pr_agent/settings_prod\n              name: settings-volume\n

    Another option is to set the secrets as environment variables in your deployment environment, for example OPENAI.KEY and GITHUB.USER_TOKEN.

6) Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:

```\ndocker build . -t codiumai/pr-agent:github_app --target github_app -f docker/Dockerfile\ndocker push codiumai/pr-agent:github_app  # Push to your Docker repository\n```\n
  1. Host the app using a server, serverless function, or container environment. Alternatively, for development and debugging, you may use tools like smee.io to forward webhooks to your local machine. You can check Deploy as a Lambda Function

  2. Go back to your app's settings, and set the following:

  3. Webhook URL: The URL of your app's server or the URL of the smee.io channel.

  4. Webhook secret: The secret you generated earlier.

  5. Install the app by navigating to the \"Install App\" tab and selecting your desired repositories.

Note: When running Qodo Merge from GitHub app, the default configuration file (configuration.toml) will be loaded. However, you can override the default tool parameters by uploading a local configuration file .pr_agent.toml For more information please check out the USAGE GUIDE

"},{"location":"installation/github/#deploy-as-a-lambda-function","title":"Deploy as a Lambda Function","text":"

Note that since AWS Lambda env vars cannot have \".\" in the name, you can replace each \".\" in an env variable with \"__\". For example: GITHUB.WEBHOOK_SECRET --> GITHUB__WEBHOOK_SECRET

  1. Follow steps 1-5 from here.
  2. Build a docker image that can be used as a lambda function shell docker buildx build --platform=linux/amd64 . -t codiumai/pr-agent:serverless -f docker/Dockerfile.lambda
  3. Push image to ECR
    docker tag codiumai/pr-agent:serverless <AWS_ACCOUNT>.dkr.ecr.<AWS_REGION>.amazonaws.com/codiumai/pr-agent:serverless\ndocker push <AWS_ACCOUNT>.dkr.ecr.<AWS_REGION>.amazonaws.com/codiumai/pr-agent:serverless\n
  4. Create a lambda function that uses the uploaded image. Set the lambda timeout to be at least 3m.
  5. Configure the lambda function to have a Function URL.
  6. In the environment variables of the Lambda function, specify AZURE_DEVOPS_CACHE_DIR to a writable location such as /tmp. (see link)
  7. Go back to steps 8-9 of Method 5 with the function url as your Webhook URL. The Webhook URL would look like https://<LAMBDA_FUNCTION_URL>/api/v1/github_webhooks
"},{"location":"installation/github/#aws-codecommit-setup","title":"AWS CodeCommit Setup","text":"

Not all features have been added to CodeCommit yet. As of right now, CodeCommit has been implemented to run the Qodo Merge CLI on the command line, using AWS credentials stored in environment variables. (More features will be added in the future.) The following is a set of instructions to have Qodo Merge do a review of your CodeCommit pull request from the command line:

  1. Create an IAM user that you will use to read CodeCommit pull requests and post comments
    • Note: That user should have CLI access only, not Console access
  2. Add IAM permissions to that user, to allow access to CodeCommit (see IAM Role example below)
  3. Generate an Access Key for your IAM user
  4. Set the Access Key and Secret using environment variables (see Access Key example below)
  5. Set the git_provider value to codecommit in the pr_agent/settings/configuration.toml settings file
  6. Set the PYTHONPATH to include your pr-agent project directory
    • Option A: Add PYTHONPATH=\"/PATH/TO/PROJECTS/pr-agent to your .env file
    • Option B: Set PYTHONPATH and run the CLI in one command, for example:
      • PYTHONPATH=\"/PATH/TO/PROJECTS/pr-agent python pr_agent/cli.py [--ARGS]
"},{"location":"installation/github/#aws-codecommit-iam-role-example","title":"AWS CodeCommit IAM Role Example","text":"

Example IAM permissions to that user to allow access to CodeCommit:

  • Note: The following is a working example of IAM permissions that has read access to the repositories and write access to allow posting comments
  • Note: If you only want pr-agent to review your pull requests, you can tighten the IAM permissions further, however this IAM example will work, and allow the pr-agent to post comments to the PR
  • Note: You may want to replace the \"Resource\": \"*\" with your list of repos, to limit access to only those repos
{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"codecommit:BatchDescribe*\",\n                \"codecommit:BatchGet*\",\n                \"codecommit:Describe*\",\n                \"codecommit:EvaluatePullRequestApprovalRules\",\n                \"codecommit:Get*\",\n                \"codecommit:List*\",\n                \"codecommit:PostComment*\",\n                \"codecommit:PutCommentReaction\",\n                \"codecommit:UpdatePullRequestDescription\",\n                \"codecommit:UpdatePullRequestTitle\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n
"},{"location":"installation/github/#aws-codecommit-access-key-and-secret","title":"AWS CodeCommit Access Key and Secret","text":"

Example setting the Access Key and Secret using environment variables

export AWS_ACCESS_KEY_ID=\"XXXXXXXXXXXXXXXX\"\nexport AWS_SECRET_ACCESS_KEY=\"XXXXXXXXXXXXXXXX\"\nexport AWS_DEFAULT_REGION=\"us-east-1\"\n
"},{"location":"installation/github/#aws-codecommit-cli-example","title":"AWS CodeCommit CLI Example","text":"

After you set up AWS CodeCommit using the instructions above, here is an example CLI run that tells pr-agent to review a given pull request. (Replace your specific PYTHONPATH and PR URL in the example)

PYTHONPATH=\"/PATH/TO/PROJECTS/pr-agent\" python pr_agent/cli.py \\\n  --pr_url https://us-east-1.console.aws.amazon.com/codesuite/codecommit/repositories/MY_REPO_NAME/pull-requests/321 \\\n  review\n
"},{"location":"installation/gitlab/","title":"GitLab","text":""},{"location":"installation/gitlab/#run-as-a-gitlab-pipeline","title":"Run as a GitLab Pipeline","text":"

You can use a pre-built Action Docker image to run Qodo Merge as a GitLab pipeline. This is a simple way to get started with Qodo Merge without setting up your own server.

(1) Add the following file to your repository under .gitlab-ci.yml:

stages:\n  - pr_agent\n\npr_agent_job:\n  stage: pr_agent\n  image:\n    name: codiumai/pr-agent:latest\n    entrypoint: [\"\"]\n  script:\n    - cd /app\n    - echo \"Running PR Agent action step\"\n    - export MR_URL=\"$CI_MERGE_REQUEST_PROJECT_URL/merge_requests/$CI_MERGE_REQUEST_IID\"\n    - echo \"MR_URL=$MR_URL\"\n    - export gitlab__url=$CI_SERVER_PROTOCOL://$CI_SERVER_FQDN\n    - export gitlab__PERSONAL_ACCESS_TOKEN=$GITLAB_PERSONAL_ACCESS_TOKEN\n    - export config__git_provider=\"gitlab\"\n    - export openai__key=$OPENAI_KEY\n    - python -m pr_agent.cli --pr_url=\"$MR_URL\" describe\n    - python -m pr_agent.cli --pr_url=\"$MR_URL\" review\n    - python -m pr_agent.cli --pr_url=\"$MR_URL\" improve\n  rules:\n    - if: '$CI_PIPELINE_SOURCE == \"merge_request_event\"'\n
This script will run Qodo Merge on every new merge request. You can modify the rules section to run Qodo Merge on different events. You can also modify the script section to run different Qodo Merge commands, or with different parameters by exporting different environment variables.

(2) Add the following masked variables to your GitLab repository (CI/CD -> Variables):

  • GITLAB_PERSONAL_ACCESS_TOKEN: Your GitLab personal access token.

  • OPENAI_KEY: Your OpenAI key.

Note that if your base branches are not protected, don't set the variables as protected, since the pipeline will not have access to them.

"},{"location":"installation/gitlab/#run-a-gitlab-webhook-server","title":"Run a GitLab webhook server","text":"
  1. From the GitLab workspace or group, create an access token. Enable the \"api\" scope only.

  2. Generate a random secret for your app, and save it for later. For example, you can use:

WEBHOOK_SECRET=$(python -c \"import secrets; print(secrets.token_hex(10))\")\n
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from here steps 4-7.

  1. In the secrets file, fill in the following:

    • Your OpenAI key.
    • In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
    • Set deployment_type to 'gitlab' in configuration.toml
  2. Create a webhook in GitLab. Set the URL to http[s]://<PR_AGENT_HOSTNAME>/webhook. Set the secret token to the generated secret from step 2. In the \"Trigger\" section, check the \u2018comments\u2019 and \u2018merge request events\u2019 boxes.

  3. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.

"},{"location":"installation/locally/","title":"Locally","text":""},{"location":"installation/locally/#using-pip-package","title":"Using pip package","text":"

Install the package:

pip install pr-agent\n

Then run the relevant tool with the script below. Make sure to fill in the required parameters (user_token, openai_key, pr_url, command):

from pr_agent import cli\nfrom pr_agent.config_loader import get_settings\n\ndef main():\n    # Fill in the following values\n    provider = \"github\" # github/gitlab/bitbucket/azure_devops\n    user_token = \"...\"  #  user token\n    openai_key = \"...\"  # OpenAI key\n    pr_url = \"...\"      # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'\n    command = \"/review\" # Command to run (e.g. '/review', '/describe', '/ask=\"What is the purpose of this PR?\"', ...)\n\n    # Setting the configurations\n    get_settings().set(\"CONFIG.git_provider\", provider)\n    get_settings().set(\"openai.key\", openai_key)\n    get_settings().set(\"github.user_token\", user_token)\n\n    # Run the command. Feedback will appear in GitHub PR comments\n    cli.run_command(pr_url, command)\n\n\nif __name__ == '__main__':\n    main()\n
"},{"location":"installation/locally/#using-docker-image","title":"Using Docker image","text":"

A list of the relevant tools can be found in the tools guide.

To invoke a tool (for example review), you can run directly from the Docker image. Here's how:

  • For GitHub:

    docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review\n
    If you are using GitHub enterprise server, you need to specify the custom url as variable. For example, if your GitHub server is at https://github.mycompany.com, add the following to the command:
    -e GITHUB.BASE_URL=https://github.mycompany.com/api/v3\n

  • For GitLab:

    docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review\n

    If you have a dedicated GitLab instance, you need to specify the custom url as variable:

    -e GITLAB.URL=<your gitlab instance url> \n

  • For BitBucket:

    docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review\n

For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the pr_agent/settings/.secrets_template.toml file for the environment variables expected names and values.

"},{"location":"installation/locally/#run-from-source","title":"Run from source","text":"
  1. Clone this repository:
git clone https://github.com/Codium-ai/pr-agent.git\n
  1. Navigate to the /pr-agent folder and install the requirements in your favorite virtual environment:
pip install -e .\n

Note: If you get an error related to Rust in the dependency installation then make sure Rust is installed and in your PATH, instructions: https://rustup.rs

  1. Copy the secrets template file and fill in your OpenAI key and your GitHub user token:
cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml\nchmod 600 pr_agent/settings/.secrets.toml\n# Edit .secrets.toml file\n
  1. Run the cli.py script:
python3 -m pr_agent.cli --pr_url <pr_url> review\npython3 -m pr_agent.cli --pr_url <pr_url> ask <your question>\npython3 -m pr_agent.cli --pr_url <pr_url> describe\npython3 -m pr_agent.cli --pr_url <pr_url> improve\npython3 -m pr_agent.cli --pr_url <pr_url> add_docs\npython3 -m pr_agent.cli --pr_url <pr_url> generate_labels\npython3 -m pr_agent.cli --issue_url <issue_url> similar_issue\n...\n

[Optional] Add the pr_agent folder to your PYTHONPATH

export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>\n

"},{"location":"installation/pr_agent_pro/","title":"\ud83d\udc8e Qodo Merge Pro","text":""},{"location":"installation/pr_agent_pro/#getting-started-with-qodo-merge-pro","title":"Getting Started with Qodo Merge Pro","text":"

Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI. See here for more details about the benefits of using Qodo Merge Pro.

Interested parties can subscribe to Qodo Merge Pro through the following link. After subscribing, you are granted the ability to easily install the application across any of your repositories.

Each user who wants to use Qodo Merge pro needs to buy a seat. Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat. Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.

Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks. Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.

"},{"location":"installation/pr_agent_pro/#install-qodo-merge-pro-for-github-enterprise-server","title":"Install Qodo Merge Pro for GitHub Enterprise Server","text":"

To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an Enterprise trial.

"},{"location":"installation/pr_agent_pro/#install-qodo-merge-pro-for-gitlab-teams-enterprise","title":"Install Qodo Merge Pro for GitLab (Teams & Enterprise)","text":"

Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:

"},{"location":"installation/pr_agent_pro/#step-1","title":"Step 1","text":"

Acquire a personal, project or group level access token. Enable the \u201capi\u201d scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.

Store the token in a safe place, you won\u2019t be able to access it again after it was generated.

"},{"location":"installation/pr_agent_pro/#step-2","title":"Step 2","text":"

Generate a shared secret and link it to the access token. Browse to https://register.gitlab.pr-agent.codium.ai. Fill in your generated GitLab token and your company or personal name in the appropriate fields and click \"Submit\".

You should see \"Success!\" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you won\u2019t be able to access it again after it was generated.

"},{"location":"installation/pr_agent_pro/#step-3","title":"Step 3","text":"

Install a webhook for your repository or groups, by clicking \u201cwebhooks\u201d on the settings menu. Click the \u201cAdd new webhook\u201d button.

In the webhook definition form, fill in the following fields: URL: https://pro.gitlab.pr-agent.codium.ai/webhook

Secret token: Your CodiumAI key Trigger: Check the \u2018comments\u2019 and \u2018merge request events\u2019 boxes. Enable SSL verification: Check the box.

"},{"location":"installation/pr_agent_pro/#step-4","title":"Step 4","text":"

You\u2019re all set!

Open a new merge request or add a MR comment with one of Qodo Merge\u2019s commands such as /review, /describe or /improve.

"},{"location":"overview/","title":"Overview","text":"

Qodo Merge is an open-source tool to help efficiently review and handle pull requests.

  • See the Installation Guide for instructions on installing and running the tool on different git platforms.

  • See the Usage Guide for instructions on running the Qodo Merge commands via different interfaces, including CLI, online usage, or by automatically triggering them when a new PR is opened.

  • See the Tools Guide for a detailed description of the different tools.

"},{"location":"overview/#qodo-merge-docs-smart-search","title":"Qodo Merge Docs Smart Search","text":"

To search the documentation site using natural language:

1) Comment /help \"your question\" in either:

  • A pull request where Qodo Merge is installed
  • A PR Chat

2) Qodo Merge will respond with an answer that includes relevant documentation links.

"},{"location":"overview/#qodo-merge-features","title":"Qodo Merge Features","text":"

Qodo Merge offers extensive pull request functionalities across various git providers.

GitHub Gitlab Bitbucket Azure DevOps TOOLS Review \u2705 \u2705 \u2705 \u2705 \u2b91 Incremental \u2705 Ask \u2705 \u2705 \u2705 \u2705 Describe \u2705 \u2705 \u2705 \u2705 \u2b91 Inline file summary \ud83d\udc8e \u2705 \u2705 \u2705 Improve \u2705 \u2705 \u2705 \u2705 \u2b91 Extended \u2705 \u2705 \u2705 \u2705 Custom Prompt \ud83d\udc8e \u2705 \u2705 \u2705 \u2705 Reflect and Review \u2705 \u2705 \u2705 \u2705 Update CHANGELOG.md \u2705 \u2705 \u2705 \ufe0f Find Similar Issue \u2705 \ufe0f Add PR Documentation \ud83d\udc8e \u2705 \u2705 \u2705 Generate Custom Labels \ud83d\udc8e \u2705 \u2705 \u2705 Analyze PR Components \ud83d\udc8e \u2705 \u2705 \u2705 \ufe0f USAGE CLI \u2705 \u2705 \u2705 \u2705 App / webhook \u2705 \u2705 \u2705 \u2705 Actions \u2705 \ufe0f CORE PR compression \u2705 \u2705 \u2705 \u2705 Repo language prioritization \u2705 \u2705 \u2705 \u2705 Adaptive and token-aware file patch fitting \u2705 \u2705 \u2705 \u2705 Multiple models support \u2705 \u2705 \u2705 \u2705 Incremental PR review \u2705 Static code analysis \ud83d\udc8e \u2705 \u2705 \u2705 \u2705 Multiple configuration options \ud83d\udc8e \u2705 \u2705 \u2705 \u2705

\ud83d\udc8e marks a feature available only in Qodo Merge Pro

"},{"location":"overview/#example-results","title":"Example Results","text":""},{"location":"overview/#describe","title":"/describe","text":""},{"location":"overview/#review","title":"/review","text":""},{"location":"overview/#improve","title":"/improve","text":""},{"location":"overview/#generate_labels","title":"/generate_labels","text":""},{"location":"overview/#how-it-works","title":"How it Works","text":"

The following diagram illustrates Qodo Merge tools and their flow:

Check out the PR Compression strategy page for more details on how we convert a code diff to a manageable LLM prompt

"},{"location":"overview/data_privacy/","title":"Data Privacy","text":""},{"location":"overview/data_privacy/#self-hosted-qodo-merge","title":"Self-hosted Qodo Merge","text":"
  • If you self-host Qodo Merge with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to Qodo Merge servers.
"},{"location":"overview/data_privacy/#qodo-merge-pro","title":"Qodo Merge Pro \ud83d\udc8e","text":"
  • When using Qodo Merge Pro \ud83d\udc8e, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.

  • For certain clients, CodiumAI-hosted Qodo Merge Pro will use CodiumAI\u2019s proprietary models. If this is the case, you will be notified.

  • No passive collection of Code and Pull Requests\u2019 data \u2014 Qodo Merge will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.

"},{"location":"overview/data_privacy/#qodo-merge-chrome-extension","title":"Qodo Merge Chrome extension","text":"
  • The Qodo Merge Chrome extension will not send your code to any external servers.
"},{"location":"overview/pr_agent_pro/","title":"\ud83d\udc8e Qodo Merge Pro","text":""},{"location":"overview/pr_agent_pro/#overview","title":"Overview","text":"

Qodo Merge Pro is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee. Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:

  1. Fully managed - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\\GitLab\\BitBucket repo.

  2. Improved privacy - No data will be stored or used to train models. Qodo Merge Pro will employ zero data retention, and will use an OpenAI and Claude accounts with zero data retention.

  3. Improved support - Qodo Merge Pro users will receive priority support, and will be able to request new features and capabilities.

  4. Supporting self-hosted git servers - Qodo Merge Pro can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the installation guide.

  5. PR Chat - Qodo Merge Pro allows you to engage in private chat about your pull requests on private repositories.

"},{"location":"overview/pr_agent_pro/#additional-features","title":"Additional features","text":"

Here are some of the additional features and capabilities that Qodo Merge Pro offers:

Feature Description Model selection Choose the model that best fits your needs, among top models like GPT4 and Claude-Sonnet-3.5 Global and wiki configuration Control configurations for many repositories from a single location; Edit configuration of a single repo without committing code Apply suggestions Generate committable code from the relevant suggestions interactively by clicking on a checkbox Suggestions impact Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions CI feedback Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly Advanced usage statistics Qodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights Incorporating companies' best practices Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions Interactive triggering Interactively apply different tools via the analyze command Custom labels Define custom labels for Qodo Merge to assign to the PR"},{"location":"overview/pr_agent_pro/#additional-tools","title":"Additional tools","text":"

Here are additional tools that are available only for Qodo Merge Pro users:

Feature Description Custom Prompt Suggestions Generate code suggestions based on custom prompts from the user Analyze PR components Identify the components that changed in the PR, and enable to interactively apply different tools to them Tests Generate tests for code components that changed in the PR PR documentation Generate docstring for code components that changed in the PR Improve Component Generate code suggestions for code components that changed in the PR Similar code search Search for similar code in the repository, organization, or entire GitHub"},{"location":"overview/pr_agent_pro/#supported-languages","title":"Supported languages","text":"

Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4. As a result, its primary tools such as describe, review, and improve, as well as the PR-chat feature, support virtually all programming languages.

For specialized commands that require static code analysis, Qodo Merge Pro offers support for specific languages. For more details about features that require static code analysis, please refer to the documentation.

"},{"location":"tools/","title":"Tools","text":"

Here is a list of Qodo Merge tools, each with a dedicated page that explains how to use it:

Tool Description PR Description (/describe) Automatically generating PR description - title, type, summary, code walkthrough and labels PR Review (/review) Adjustable feedback about the PR, possible issues, security concerns, review effort and more Code Suggestions (/improve) Code suggestions for improving the PR Question Answering (/ask ...) Answering free-text questions about the PR, or on specific code lines Update Changelog (/update_changelog) Automatically updating the CHANGELOG.md file with the PR changes Find Similar Issue (/similar_issue) Automatically retrieves and presents similar issues Help (/help) Provides a list of all the available tools. Also enables to trigger them interactively (\ud83d\udc8e) \ud83d\udc8e Add Documentation (/add_docs) Generates documentation to methods/functions/classes that changed in the PR \ud83d\udc8e Generate Custom Labels (/generate_labels) Generates custom labels for the PR, based on specific guidelines defined by the user \ud83d\udc8e Analyze (/analyze) Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component \ud83d\udc8e Custom Prompt (/custom_prompt) Automatically generates custom suggestions for improving the PR code, based on specific guidelines defined by the user \ud83d\udc8e Generate Tests (/test component_name) Automatically generates unit tests for a selected component, based on the PR code changes \ud83d\udc8e Improve Component (/improve_component component_name) Generates code suggestions for a specific code component that changed in the PR \ud83d\udc8e CI Feedback (/checks ci_job) Automatically generates feedback and analysis for a failed CI job

Note that the tools marked with \ud83d\udc8e are available only for Qodo Merge Pro users.

"},{"location":"tools/analyze/","title":"\ud83d\udc8e Analyze","text":""},{"location":"tools/analyze/#overview","title":"Overview","text":"

The analyze tool combines advanced static code analysis with LLM capabilities to provide a comprehensive analysis of the PR code changes.

The tool scans the PR code changes, finds the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.

It can be invoked manually by commenting on any PR:

/analyze\n

"},{"location":"tools/analyze/#example-usage","title":"Example usage","text":"

An example result:

Notes

  • Language that are currently supported: Python, Java, C++, JavaScript, TypeScript, C#.
"},{"location":"tools/ask/","title":"Ask","text":""},{"location":"tools/ask/#overview","title":"Overview","text":"

The ask tool answers questions about the PR, based on the PR code changes. Make sure to be specific and clear in your questions. It can be invoked manually by commenting on any PR:

/ask \"...\"\n

"},{"location":"tools/ask/#example-usage","title":"Example usage","text":""},{"location":"tools/ask/#ask-lines","title":"Ask lines","text":"

You can run /ask on specific lines of code in the PR from the PR's diff view. The tool will answer questions based on the code changes in the selected lines. - Click on the '+' sign next to the line number to select the line. - To select multiple lines, click on the '+' sign of the first line and then hold and drag to select the rest of the lines. - write /ask \"...\" in the comment box and press Add single comment button.

Note that the tool does not have \"memory\" of previous questions, and answers each question independently.

"},{"location":"tools/ask/#ask-on-images","title":"Ask on images","text":"

You can also ask questions about images that appear in the comment, where the entire PR code will be used as context. The basic syntax is:

/ask \"...\"\n\n[Image](https://real_link_to_image)\n
where https://real_link_to_image is the direct link to the image.

Note that GitHub has a built-in mechanism of pasting images in comments. However, pasted image does not provide a direct link. To get a direct link to an image, we recommend using the following scheme:

1) First, post a comment that contains only the image:

2) Quote reply to that comment:

3) In the screen opened, type the question below the image:

4) Post the comment, and receive the answer:

See a full video tutorial here

"},{"location":"tools/ci_feedback/","title":"\ud83d\udc8e CI Feedback","text":""},{"location":"tools/ci_feedback/#overview","title":"Overview","text":"

The CI feedback tool (/checks) automatically triggers when a PR has a failed check. The tool analyzes the failed checks and provides several feedbacks:

  • Failed stage
  • Failed test name
  • Failure summary
  • Relevant error logs
"},{"location":"tools/ci_feedback/#example-usage","title":"Example usage","text":"

\u2192

In addition to being automatically triggered, the tool can also be invoked manually by commenting on a PR:

/checks \"https://github.com/{repo_name}/actions/runs/{run_number}/job/{job_number}\"\n
where {repo_name} is the name of the repository, {run_number} is the run number of the failed check, and {job_number} is the job number of the failed check.

"},{"location":"tools/ci_feedback/#disabling-the-tool-from-running-automatically","title":"Disabling the tool from running automatically","text":"

If you wish to disable the tool from running automatically, you can do so by adding the following configuration to the configuration file:

[checks]\nenable_auto_checks_feedback = false\n

"},{"location":"tools/ci_feedback/#configuration-options","title":"Configuration options","text":"
  • enable_auto_checks_feedback - if set to true, the tool will automatically provide feedback when a check is failed. Default is true.
  • excluded_checks_list - a list of checks to exclude from the feedback, for example: [\"check1\", \"check2\"]. Default is an empty list.
  • persistent_comment - if set to true, the tool will overwrite a previous checks comment with the new feedback. Default is true.
  • enable_help_text=true - if set to true, the tool will provide a help message when a user comments \"/checks\" on a PR. Default is true.
  • final_update_message - if persistent_comment is true and updating a previous checks message, the tool will also create a new message: \"Persistent checks updated to latest commit\". Default is true.
"},{"location":"tools/custom_labels/","title":"\ud83d\udc8e Custom Labels","text":""},{"location":"tools/custom_labels/#overview","title":"Overview","text":"

The generate_labels tool scans the PR code changes, and given a list of labels and their descriptions, it automatically suggests labels that match the PR code changes.

It can be invoked manually by commenting on any PR:

/generate_labels\n

"},{"location":"tools/custom_labels/#example-usage","title":"Example usage","text":"

If we wish to add detect changes to SQL queries in a given PR, we can add the following custom label along with its description:

When running the generate_labels tool on a PR that includes changes in SQL queries, it will automatically suggest the custom label:

Note that in addition to the dedicated tool generate_labels, the custom labels will also be used by the describe tool.

"},{"location":"tools/custom_labels/#how-to-enable-custom-labels","title":"How to enable custom labels","text":"

There are 3 ways to enable custom labels:

"},{"location":"tools/custom_labels/#1-cli-local-configuration-file","title":"1. CLI (local configuration file)","text":"

When working from CLI, you need to apply the configuration changes to the custom_labels file:

"},{"location":"tools/custom_labels/#2-repo-configuration-file","title":"2. Repo configuration file","text":"

To enable custom labels, you need to apply the configuration changes to the local .pr_agent.toml file in your repository.

"},{"location":"tools/custom_labels/#3-handle-custom-labels-from-the-repos-labels-page","title":"3. Handle custom labels from the Repo's labels page \ud83d\udc8e","text":"

This feature is available only in Qodo Merge Pro

  • GitHub : https://github.com/{owner}/{repo}/labels, or click on the \"Labels\" tab in the issues or PRs page.
  • GitLab : https://gitlab.com/{owner}/{repo}/-/labels, or click on \"Manage\" -> \"Labels\" on the left menu.

b. Add/edit the custom labels. It should be formatted as follows: * Label name: The name of the custom label. * Description: Start the description of with prefix pr_agent:, for example: pr_agent: Description of when AI should suggest this label. The description should be comprehensive and detailed, indicating when to add the desired label.

c. Now the custom labels will be included in the generate_labels tool.

This feature is supported in GitHub and GitLab.

"},{"location":"tools/custom_labels/#configuration-options","title":"Configuration options","text":"
  • Change enable_custom_labels to True: This will turn off the default labels and enable the custom labels provided in the custom_labels.toml file.
  • Add the custom labels. It should be formatted as follows:
[config]\nenable_custom_labels=true\n\n[custom_labels.\"Custom Label Name\"]\ndescription = \"Description of when AI should suggest this label\"\n\n[custom_labels.\"Custom Label 2\"]\ndescription = \"Description of when AI should suggest this label 2\"\n
"},{"location":"tools/custom_prompt/","title":"\ud83d\udc8e Custom Prompt","text":""},{"location":"tools/custom_prompt/#overview","title":"Overview","text":"

The custom_prompt tool scans the PR code changes, and automatically generates suggestions for improving the PR code. It shares similarities with the improve tool, but with one main difference: the custom_prompt tool will only propose suggestions that follow specific guidelines defined by the prompt in: pr_custom_prompt.prompt configuration.

The tool can be triggered automatically every time a new PR is opened, or can be invoked manually by commenting on a PR.

When commenting, use the following template:

/custom_prompt --pr_custom_prompt.prompt=\"\nThe code suggestions should focus only on the following:\n- ...\n- ...\n\n\"\n

With a configuration file, use the following template:

[pr_custom_prompt]\nprompt=\"\"\"\\\nThe suggestions should focus only on the following:\n-...\n-...\n\n\"\"\"\n

Remember - with this tool, you are the prompter. Be specific, clear, and concise in the instructions. Specify relevant aspects that you want the model to focus on. \\ You might benefit from several trial-and-error iterations, until you get the correct prompt for your use case.

"},{"location":"tools/custom_prompt/#example-usage","title":"Example usage","text":"

Here is an example of a possible prompt, defined in the configuration file:

[pr_custom_prompt]\nprompt=\"\"\"\\\nThe code suggestions should focus only on the following:\n- look for edge cases when implementing a new function\n- make sure every variable has a meaningful name\n- make sure the code is efficient\n\"\"\"\n

(The instructions above are just an example. We want to emphasize that the prompt should be specific and clear, and be tailored to the needs of your project)

Results obtained with the prompt above:

"},{"location":"tools/custom_prompt/#configuration-options","title":"Configuration options","text":"

prompt: the prompt for the tool. It should be a multi-line string.

num_code_suggestions: number of code suggestions provided by the 'custom_prompt' tool. Default is 4.

enable_help_text: if set to true, the tool will display a help text in the comment. Default is true.

"},{"location":"tools/describe/","title":"Describe","text":""},{"location":"tools/describe/#overview","title":"Overview","text":"

The describe tool scans the PR code changes, and generates a description for the PR - title, type, summary, walkthrough and labels.

The tool can be triggered automatically every time a new PR is opened, or it can be invoked manually by commenting on any PR:

/describe\n

"},{"location":"tools/describe/#example-usage","title":"Example usage","text":""},{"location":"tools/describe/#manual-triggering","title":"Manual triggering","text":"

Invoke the tool manually by commenting /describe on any PR:

After ~30 seconds, the tool will generate a description for the PR:

If you want to edit configurations, add the relevant ones to the command:

/describe --pr_description.some_config1=... --pr_description.some_config2=...\n

"},{"location":"tools/describe/#automatic-triggering","title":"Automatic triggering","text":"

To run the describe automatically when a PR is opened, define in a configuration file:

[github_app]\npr_commands = [\n    \"/describe\",\n    ...\n]\n\n[pr_description]\npublish_labels = true\n...\n

  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • The [pr_description] section contains the configurations for the describe tool you want to edit (if any).
"},{"location":"tools/describe/#configuration-options","title":"Configuration options","text":"

Possible configurations

publish_labels If set to true, the tool will publish labels to the PR. Default is false. publish_description_as_comment If set to true, the tool will publish the description as a comment to the PR. If false, it will overwrite the original description. Default is false. publish_description_as_comment_persistent If set to true and `publish_description_as_comment` is true, the tool will publish the description as a persistent comment to the PR. Default is true. add_original_user_description If set to true, the tool will add the original user description to the generated description. Default is true. generate_ai_title If set to true, the tool will also generate an AI title for the PR. Default is false. extra_instructions Optional extra instructions to the tool. For example: \"focus on the changes in the file X. Ignore change in ...\" enable_pr_type If set to false, it will not show the `PR type` as a text value in the description content. Default is true. final_update_message If set to true, it will add a comment message [`PR Description updated to latest commit...`](https://github.com/Codium-ai/pr-agent/pull/499#issuecomment-1837412176) after finishing calling `/describe`. Default is false. enable_semantic_files_types If set to true, \"Changes walkthrough\" section will be generated. Default is true. collapsible_file_list If set to true, the file list in the \"Changes walkthrough\" section will be collapsible. If set to \"adaptive\", the file list will be collapsible only if there are more than 8 files. Default is \"adaptive\". enable_large_pr_handling Pro feature. If set to true, in case of a large PR the tool will make several calls to the AI and combine them to be able to cover more files. Default is true. enable_help_text If set to true, the tool will display a help text in the comment. Default is false."},{"location":"tools/describe/#inline-file-summary","title":"Inline file summary \ud83d\udc8e","text":"

This feature enables you to copy the changes walkthrough table to the \"Files changed\" tab, so you can quickly understand the changes in each file while reviewing the code changes (diff view).

To copy the changes walkthrough table to the \"Files changed\" tab, you can click on the checkbox that appears PR Description status message below the main PR Description:

If you prefer to have the file summaries appear in the \"Files changed\" tab on every PR, change the pr_description.inline_file_summary parameter in the configuration file, possible values are:

  • 'table': File changes walkthrough table will be displayed on the top of the \"Files changed\" tab, in addition to the \"Conversation\" tab.

  • true: A collapsible file comment with changes title and a changes summary for each file in the PR.

  • false (default): File changes walkthrough will be added only to the \"Conversation\" tab.

Note: that this feature is currently available only for GitHub.

"},{"location":"tools/describe/#markers-template","title":"Markers template","text":"

To enable markers, set pr_description.use_description_markers=true. Markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.

For example, if the PR original description was:

User content...\n\n## PR Type:\npr_agent:type\n\n## PR Description:\npr_agent:summary\n\n## PR Walkthrough:\npr_agent:walkthrough\n
The marker pr_agent:type will be replaced with the PR type, pr_agent:summary will be replaced with the PR summary, and pr_agent:walkthrough will be replaced with the PR walkthrough.

\u2192

Configuration params:

  • use_description_markers: if set to true, the tool will use markers template. It replaces every marker of the form pr_agent:marker_name with the relevant content. Default is false.
  • include_generated_by_header: if set to true, the tool will add a dedicated header: 'Generated by PR Agent at ...' to any automatic content. Default is true.
"},{"location":"tools/describe/#custom-labels","title":"Custom labels","text":"

The default labels of the describe tool are quite generic, since they are meant to be used in any repo: [Bug fix, Tests, Enhancement, Documentation, Other].

You can define custom labels that are relevant for your repo and use cases. Custom labels can be defined in a configuration file, or directly in the repo's labels page.

Make sure to provide proper title, and a detailed and well-phrased description for each label, so the tool will know when to suggest it. Each label description should be a conditional statement, that indicates if to add the label to the PR or not, according to the PR content.

"},{"location":"tools/describe/#handle-custom-labels-from-a-configuration-file","title":"Handle custom labels from a configuration file","text":"

Example for a custom labels configuration setup in a configuration file:

[config]\nenable_custom_labels=true\n\n\n[custom_labels.\"sql_changes\"]\ndescription = \"Use when a PR contains changes to SQL queries\"\n\n[custom_labels.\"test\"]\ndescription = \"use when a PR primarily contains new tests\"\n\n...\n

"},{"location":"tools/describe/#handle-custom-labels-from-the-repos-labels-page","title":"Handle custom labels from the Repo's labels page \ud83d\udc8e","text":"

You can also control the custom labels that will be suggested by the describe tool from the repo's labels page:

  • GitHub : go to https://github.com/{owner}/{repo}/labels (or click on the \"Labels\" tab in the issues or PRs page)
  • GitLab : go to https://gitlab.com/{owner}/{repo}/-/labels (or click on \"Manage\" -> \"Labels\" on the left menu)

Now add/edit the custom labels. they should be formatted as follows:

  • Label name: The name of the custom label.
  • Description: Start the description of with prefix pr_agent:, for example: pr_agent: Description of when AI should suggest this label.

Examples for custom labels:

  • Main topic:performance - pr_agent:The main topic of this PR is performance
  • New endpoint - pr_agent:A new endpoint was added in this PR
  • SQL query - pr_agent:A new SQL query was added in this PR
  • Dockerfile changes - pr_agent:The PR contains changes in the Dockerfile
  • ...

The description should be comprehensive and detailed, indicating when to add the desired label. For example:

"},{"location":"tools/describe/#usage-tips","title":"Usage Tips","text":"

Automation

  • When you first install Qodo Merge app, the default mode for the describe tool is:
    pr_commands = [\"/describe\", ...]\n
    meaning the describe tool will run automatically on every PR, with the default configurations.
  • Markers are an alternative way to control the generated description, to give maximal control to the user. If you set:

    pr_commands = [\"/describe --pr_description.use_description_markers=true\", ...]\n
    the tool will replace every marker of the form pr_agent:marker_name in the PR description with the relevant content, where marker_name is one of the following: * type: the PR type. * summary: the PR summary. * walkthrough: the PR walkthrough.

  • Note that when markers are enabled, if the original PR description does not contain any markers, the tool will not alter the description at all.

"},{"location":"tools/documentation/","title":"\ud83d\udc8e Documentation","text":""},{"location":"tools/documentation/#overview","title":"Overview","text":"

The add_docs tool scans the PR code changes, and automatically suggests documentation for any code components that changed in the PR (functions, classes, etc.).

It can be invoked manually by commenting on any PR:

/add_docs\n

"},{"location":"tools/documentation/#example-usage","title":"Example usage","text":"

Invoke the tool manually by commenting /add_docs on any PR:

The tool will generate documentation for all the components that changed in the PR:

You can state a name of a specific component in the PR to get documentation only for that component:

/add_docs component_name\n

"},{"location":"tools/documentation/#configuration-options","title":"Configuration options","text":"
  • docs_style: The exact style of the documentation (for python docstring). you can choose between: google, numpy, sphinx, restructuredtext, plain. Default is sphinx.
  • extra_instructions: Optional extra instructions to the tool. For example: \"focus on the changes in the file X. Ignore change in ...\".

Notes

  • Language that are currently fully supported: Python, Java, C++, JavaScript, TypeScript, C#.
  • This tool can also be triggered interactively by using the analyze tool.
"},{"location":"tools/help/","title":"Help","text":""},{"location":"tools/help/#overview","title":"Overview","text":"

The help tool provides a list of all the available tools and their descriptions. For Qodo Merge Pro users, it also enables to trigger each tool by checking the relevant box.

It can be invoked manually by commenting on any PR:

/help\n

"},{"location":"tools/help/#example-usage","title":"Example usage","text":"

An example result:

\u2192

"},{"location":"tools/improve/","title":"Improve","text":""},{"location":"tools/improve/#overview","title":"Overview","text":"

The improve tool scans the PR code changes, and automatically generates meaningful suggestions for improving the PR code. The tool can be triggered automatically every time a new PR is opened, or it can be invoked manually by commenting on any PR:

/improve\n

Note that the Apply this suggestion checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge Pro \ud83d\udc8e users.

"},{"location":"tools/improve/#example-usage","title":"Example usage","text":""},{"location":"tools/improve/#manual-triggering","title":"Manual triggering","text":"

Invoke the tool manually by commenting /improve on any PR. The code suggestions by default are presented as a single comment:

To edit configurations related to the improve tool, use the following template:

/improve --pr_code_suggestions.some_config1=... --pr_code_suggestions.some_config2=...\n

For example, you can choose to present all the suggestions as commitable code comments, by running the following command:

/improve --pr_code_suggestions.commitable_code_suggestions=true\n

As can be seen, a single table comment has a significantly smaller PR footprint. We recommend this mode for most cases. Also note that collapsible are not supported in Bitbucket. Hence, the suggestions can only be presented in Bitbucket as code comments.

"},{"location":"tools/improve/#automatic-triggering","title":"Automatic triggering","text":"

To run the improve automatically when a PR is opened, define in a configuration file:

[github_app]\npr_commands = [\n    \"/improve\",\n    ...\n]\n\n[pr_code_suggestions]\nnum_code_suggestions_per_chunk = ...\n...\n

  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • The [pr_code_suggestions] section contains the configurations for the improve tool you want to edit (if any)
"},{"location":"tools/improve/#assessing-impact","title":"Assessing Impact \ud83d\udc8e","text":"

Note that Qodo Merge pro tracks two types of implementations:

  • Direct implementation - when the user directly applies the suggestion by clicking the Apply checkbox.
  • Indirect implementation - when the user implements the suggestion in their IDE environment. In this case, Qodo Merge will utilize, after each commit, a dedicated logic to identify if a suggestion was implemented, and will mark it as implemented.

In post-process, Qodo Merge counts the number of suggestions that were implemented, and provides general statistics and insights about the suggestions' impact on the PR process.

"},{"location":"tools/improve/#usage-tips","title":"Usage Tips","text":""},{"location":"tools/improve/#implementing-the-proposed-code-suggestions","title":"Implementing the proposed code suggestions","text":"

Each generated suggestion consists of three key elements:

  1. A single-line summary of the proposed change
  2. An expandable section containing a comprehensive description of the suggestion
  3. A diff snippet showing the recommended code modification (before and after)

We advise users to apply critical analysis and judgment when implementing the proposed suggestions. In addition to mistakes (which may happen, but are rare), sometimes the presented code modification may serve more as an illustrative example than a direct applicable solution. In such cases, we recommend prioritizing the suggestion's detailed description, using the diff snippet primarily as a supporting reference.

"},{"location":"tools/improve/#dual-publishing-mode","title":"Dual publishing mode","text":"

Our recommended approach for presenting code suggestions is through a table (--pr_code_suggestions.commitable_code_suggestions=false). This method significantly reduces the PR footprint and allows for quick and easy digestion of multiple suggestions.

We also offer a complementary dual publishing mode. When enabled, suggestions exceeding a certain score threshold are not only displayed in the table, but also presented as commitable PR comments. This mode helps highlight suggestions deemed more critical.

To activate dual publishing mode, use the following setting:

[pr_code_suggestions]\ndual_publishing_score_threshold = x\n

Where x represents the minimum score threshold (>=) for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).

"},{"location":"tools/improve/#self-review","title":"Self-review","text":"

If you set in a configuration file:

[pr_code_suggestions]\ndemand_code_suggestions_self_review = true\n

The improve tool will add a checkbox below the suggestions, prompting user to acknowledge that they have reviewed the suggestions. You can set the content of the checkbox text via:

[pr_code_suggestions]\ncode_suggestions_self_review_text = \"... (your text here) ...\"\n

Tip - Reducing visual footprint after self-review \ud83d\udc8e

The configuration parameter pr_code_suggestions.fold_suggestions_on_self_review (default is True) can be used to automatically fold the suggestions after the user clicks the self-review checkbox.

This reduces the visual footprint of the suggestions, and also indicates to the PR reviewer that the suggestions have been reviewed by the PR author, and don't require further attention.

Tip - Demanding self-review from the PR author \ud83d\udc8e

By setting:

[pr_code_suggestions]\napprove_pr_on_self_review = true\n
the tool can automatically add an approval when the PR author clicks the self-review checkbox.

  • If you set the number of required reviewers for a PR to 2, this effectively means that the PR author must click the self-review checkbox before the PR can be merged (in addition to a human reviewer).

  • If you keep the number of required reviewers for a PR to 1 and enable this configuration, this effectively means that the PR author can approve the PR by actively clicking the self-review checkbox.

    To prevent unauthorized approvals, this configuration defaults to false, and cannot be altered through online comments; enabling requires a direct update to the configuration file and a commit to the repository. This ensures that utilizing the feature demands a deliberate documented decision by the repository owner.

"},{"location":"tools/improve/#how-many-code-suggestions-are-generated","title":"How many code suggestions are generated?","text":"

Qodo Merge uses a dynamic strategy to generate code suggestions based on the size of the pull request (PR). Here's how it works:

1) Chunking large PRs:

  • Qodo Merge divides large PRs into 'chunks'.
  • Each chunk contains up to pr_code_suggestions.max_context_tokens tokens (default: 14,000).

2) Generating suggestions:

  • For each chunk, Qodo Merge generates up to pr_code_suggestions.num_code_suggestions_per_chunk suggestions (default: 4).

This approach has two main benefits:

  • Scalability: The number of suggestions scales with the PR size, rather than being fixed.
  • Quality: By processing smaller chunks, the AI can maintain higher quality suggestions, as larger contexts tend to decrease AI performance.

Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.

"},{"location":"tools/improve/#extra-instructions-and-best-practices","title":"'Extra instructions' and 'best practices'","text":""},{"location":"tools/improve/#extra-instructions","title":"Extra instructions","text":"

Platforms supported: GitHub, GitLab, Bitbucket

You can use the extra_instructions configuration option to give the AI model additional instructions for the improve tool. Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.

Examples for possible instructions:

[pr_code_suggestions]\nextra_instructions=\"\"\"\\\n(1) Answer in japanese\n(2) Don't suggest to add try-excpet block\n(3) Ignore changes in toml files\n...\n\"\"\"\n
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.

"},{"location":"tools/improve/#best-practices","title":"Best practices \ud83d\udc8e","text":"

Platforms supported: GitHub, GitLab

Another option to give additional guidance to the AI model is by creating a dedicated wiki page called best_practices.md. This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.

The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: Organization best practice.

Example for a best_practices.md content can be found here (adapted from Google's pyguide). This file is only an example. Since it is used as a prompt for an AI model, we want to emphasize the following:

  • It should be written in a clear and concise manner
  • If needed, it should give short relevant code snippets as examples
  • Recommended to limit the text to 800 lines or fewer. Here\u2019s why:

    1) Extremely long best practices documents may not be fully processed by the AI model.

    2) A lengthy file probably represent a more \"generic\" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.

"},{"location":"tools/improve/#local-and-global-best-practices","title":"Local and global best practices","text":"

By default, Qodo Merge will look for a local best_practices.md wiki file in the root of the relevant local repo.

If you want to enable also a global best_practices.md wiki file, set first in the global configuration file:

[best_practices]\nenable_global_best_practices = true\n

Then, create a best_practices.md wiki file in the root of global configuration repository, pr-agent-settings.

"},{"location":"tools/improve/#example-results","title":"Example results","text":""},{"location":"tools/improve/#how-to-combine-extra-instructions-and-best-practices","title":"How to combine extra instructions and best practices","text":"

The extra instructions configuration is more related to the improve tool prompt. It can be used, for example, to avoid specific suggestions (\"Don't suggest to add try-except block\", \"Ignore changes in toml files\", ...) or to emphasize specific aspects or formats (\"Answer in Japanese\", \"Give only short suggestions\", ...)

In contrast, the best_practices.md file is a general guideline for the way code should be written in the repo.

Using a combination of both can help the AI model to provide relevant and tailored suggestions.

"},{"location":"tools/improve/#configuration-options","title":"Configuration options","text":"General options

extra_instructions Optional extra instructions to the tool. For example: \"focus on the changes in the file X. Ignore change in ...\". commitable_code_suggestions If set to true, the tool will display the suggestions as commitable code comments. Default is false. dual_publishing_score_threshold Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled). persistent_comment If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false. self_reflect_on_suggestions If set to true, the improve tool will calculate an importance score for each suggestion [1-10], and sort the suggestion labels group based on this score. Default is true. suggestions_score_threshold Any suggestion with importance score less than this threshold will be removed. Default is 0. Highly recommend not to set this value above 7-8, since above it may clip relevant suggestions that can be useful. apply_suggestions_checkbox Enable the checkbox to create a committable suggestion. Default is true. enable_help_text If set to true, the tool will display a help text in the comment. Default is true. enable_chat_text If set to true, the tool will display a reference to the PR chat in the comment. Default is true.

Params for number of suggestions and AI calls

auto_extended_mode Enable chunking the PR code and running the tool on each chunk. Default is true. num_code_suggestions_per_chunk Number of code suggestions provided by the 'improve' tool, per chunk. Default is 4. max_number_of_calls Maximum number of chunks. Default is 3. rank_extended_suggestions If set to true, the tool will rank the suggestions, based on importance. Default is true.

"},{"location":"tools/improve/#a-note-on-code-suggestions-quality","title":"A note on code suggestions quality","text":"
  • AI models for code are getting better and better (Sonnet-3.5 and GPT-4), but they are not flawless. Not all the suggestions will be perfect, and a user should not accept all of them automatically. Critical reading and judgment are required.
  • While mistakes of the AI are rare but can happen, a real benefit from the suggestions of the improve (and review) tool is to catch, with high probability, mistakes or bugs done by the PR author, when they happen. So, it's a good practice to spend the needed ~30-60 seconds to review the suggestions, even if not all of them are always relevant.
  • The hierarchical structure of the suggestions is designed to help the user to quickly understand them, and to decide which ones are relevant and which are not:

    • Only if the Category header is relevant, the user should move to the summarized suggestion description
    • Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
  • In addition, we recommend to use the extra_instructions field to guide the model to suggestions that are more relevant to the specific needs of the project.

  • The interactive PR chat also provides an easy way to get more tailored suggestions and feedback from the AI model.
"},{"location":"tools/improve_component/","title":"\ud83d\udc8e Improve Component","text":""},{"location":"tools/improve_component/#overview","title":"Overview","text":"

The improve_component tool generates code suggestions for a specific code component that changed in the PR. it can be invoked manually by commenting on any PR:

/improve_component component_name\n

To get a list of the components that changed in the PR and choose the relevant component interactively, use the analyze tool.

"},{"location":"tools/improve_component/#example-usage","title":"Example usage","text":"

Invoke the tool manually by commenting /improve_component on any PR:

The tool will generate code suggestions for the selected component (if no component is stated, it will generate code suggestions for the largest component):

Notes - Language that are currently supported by the tool: Python, Java, C++, JavaScript, TypeScript, C#. - This tool can also be triggered interactively by using the analyze tool.

"},{"location":"tools/improve_component/#configuration-options","title":"Configuration options","text":"
  • num_code_suggestions: number of code suggestions to provide. Default is 4
  • extra_instructions: Optional extra instructions to the tool. For example: \"focus on ...\".
  • file: in case there are several components with the same name, you can specify the relevant file.
  • class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.
"},{"location":"tools/review/","title":"Review","text":""},{"location":"tools/review/#overview","title":"Overview","text":"

The review tool scans the PR code changes, and generates a list of feedbacks about the PR, aiming to aid the reviewing process. The tool can be triggered automatically every time a new PR is opened, or can be invoked manually by commenting on any PR:

/review\n

Note that the main purpose of the review tool is to provide the PR reviewer with useful feedbacks and insights. The PR author, in contrast, may prefer to save time and focus on the output of the improve tool, which provides actionable code suggestions.

(Read more about the different personas in the PR process and how Qodo Merge aims to assist them in our blog)

"},{"location":"tools/review/#example-usage","title":"Example usage","text":""},{"location":"tools/review/#manual-triggering","title":"Manual triggering","text":"

Invoke the tool manually by commenting /review on any PR:

After ~30 seconds, the tool will generate a review for the PR:

If you want to edit configurations, add the relevant ones to the command:

/review --pr_reviewer.some_config1=... --pr_reviewer.some_config2=...\n

"},{"location":"tools/review/#automatic-triggering","title":"Automatic triggering","text":"

To run the review automatically when a PR is opened, define in a configuration file:

[github_app]\npr_commands = [\n    \"/review\",\n    ...\n]\n\n[pr_reviewer]\nnum_code_suggestions = ...\n...\n

  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • The [pr_reviewer] section contains the configurations for the review tool you want to edit (if any).
"},{"location":"tools/review/#configuration-options","title":"Configuration options","text":"

General options

num_code_suggestions Number of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool. inline_code_comments If set to true, the tool will publish the code suggestions as comments on the code diff. Default is false. Note that you need to set `num_code_suggestions`>0 to get code suggestions persistent_comment If set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true. extra_instructions Optional extra instructions to the tool. For example: \"focus on the changes in the file X. Ignore change in ...\". enable_help_text If set to true, the tool will display a help text in the comment. Default is true.

Enable\\disable specific sub-sections

require_score_review If set to true, the tool will add a section that scores the PR. Default is false. require_tests_review If set to true, the tool will add a section that checks if the PR contains tests. Default is true. require_estimate_effort_to_review If set to true, the tool will add a section that estimates the effort needed to review the PR. Default is true. require_can_be_split_review If set to true, the tool will add a section that checks if the PR contains several themes, and can be split into smaller PRs. Default is false. require_security_review If set to true, the tool will add a section that checks if the PR contains a possible security or vulnerability issue. Default is true. require_ticket_analysis_review If set to true, and the PR contains a GitHub ticket number, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.

Adding PR labels

You can enable\\disable the review tool to add specific labels to the PR:

enable_review_labels_security If set to true, the tool will publish a 'possible security issue' label if it detects a security issue. Default is true. enable_review_labels_effort If set to true, the tool will publish a 'Review effort [1-5]: x' label. Default is true.

Auto-approval

If enabled, the review tool can approve a PR when a specific comment, /review auto_approve, is invoked.

enable_auto_approval If set to true, the tool will approve the PR when invoked with the 'auto_approve' command. Default is false. This flag can be changed only from a configuration file. maximal_review_effort Maximal effort level for auto-approval. If the PR's estimated review effort is above this threshold, the auto-approval will not run. Default is 5."},{"location":"tools/review/#usage-tips","title":"Usage Tips","text":"

General guidelines

The review tool provides a collection of configurable feedbacks about a PR. It is recommended to review the Configuration options section, and choose the relevant options for your use case.

Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example: require_score_review, and more.

On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.

Automation

When you first install Qodo Merge app, the default mode for the review tool is:

pr_commands = [\"/review --pr_reviewer.num_code_suggestions=0\", ...]\n
Meaning the review tool will run automatically on every PR, without providing code suggestions. Edit this field to enable/disable the tool, or to change the configurations used.

Possible labels from the review tool

The review tool can auto-generate two specific types of labels for a PR:

  • a possible security issue label that detects if a possible security issue exists in the PR code (enable_review_labels_security flag)
  • a Review effort [1-5]: x label, where x is the estimated effort to review the PR (enable_review_labels_effort flag)

Both modes are useful, and we recommended to enable them.

Extra instructions

Extra instructions are important. The review tool can be configured with extra instructions, which can be used to guide the model to a feedback tailored to the needs of your project.

Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify the relevant sub-tool, and the relevant aspects of the PR that you want to emphasize.

Examples of extra instructions:

[pr_reviewer]\nextra_instructions=\"\"\"\\\nIn the code feedback section, emphasize the following:\n- Does the code logic cover relevant edge cases?\n- Is the code logic clear and easy to understand?\n- Is the code logic efficient?\n...\n\"\"\"\n
Use triple quotes to write multi-line instructions. Use bullet points to make the instructions more readable.

Auto-approval

Qodo Merge can approve a PR when a specific comment is invoked.

To ensure safety, the auto-approval feature is disabled by default. To enable auto-approval, you need to actively set in a pre-defined configuration file the following:

[pr_reviewer]\nenable_auto_approval = true\n
(this specific flag cannot be set with a command line argument, only in the configuration file, committed to the repository)

After enabling, by commenting on a PR:

/review auto_approve\n
Qodo Merge will automatically approve the PR, and add a comment with the approval.

You can also enable auto-approval only if the PR meets certain requirements, such as that the estimated_review_effort label is equal or below a certain threshold, by adjusting the flag:

[pr_reviewer]\nmaximal_review_effort = 5\n

"},{"location":"tools/similar_code/","title":"\ud83d\udc8e Similar Code","text":""},{"location":"tools/similar_code/#overview","title":"Overview","text":"

The similar code tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.

For example:

Global Search for a method called chat_completion:

Qodo Merge will examine the code component and will extract the most relevant keywords to search for similar code:

  • extracted keywords: the keywords that were extracted from the code by Qodo Merge. the link will open a search page with the extracted keywords, to allow the user to modify the search if needed.
  • search context: the context in which the search will be performed, organization's codebase or open-source code (Global).
  • similar code: the most similar code components found. the link will open the code component in the relevant file.
  • relevant repositories: the open-source repositories in which that are relevant to the searched code component and it's keywords.

Search result link example:

Organization Search:

"},{"location":"tools/similar_code/#how-to-use","title":"How to use","text":""},{"location":"tools/similar_code/#manually","title":"Manually","text":"

To invoke the similar code tool manually, comment on the PR:

/find_similar_component COMPONENT_NAME\n
Where COMPONENT_NAME should be the name of a code component in the PR (class, method, function).

If there is a name ambiguity, there are two configurations that will help the tool to find the correct component:

  • --pr_find_similar_component.file: in case there are several components with the same name, you can specify the relevant file.
  • --pr_find_similar_component.class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.

example:

/find_similar_component COMPONENT_NAME --pr_find_similar_component.file=FILE_NAME\n

"},{"location":"tools/similar_code/#automatically-via-analyze-table","title":"Automatically (via Analyze table)","text":"

It can be invoked automatically from the analyze table, can be accessed by:

/analyze\n
Choose the components you want to find similar code for, and click on the similar checkbox.

If you are looking to search for similar code in the organization's codebase, you can click on the Organization checkbox, and it will invoke a new search command just for the organization's codebase.

"},{"location":"tools/similar_code/#configuration-options","title":"Configuration options","text":"
  • search_from_org: if set to true, the tool will search for similar code in the organization's codebase. Default is false.
  • number_of_keywords: number of keywords to use for the search. Default is 5.
  • number_of_results: the maximum number of results to present. Default is 5.
"},{"location":"tools/similar_issues/","title":"Similar Issues","text":""},{"location":"tools/similar_issues/#overview","title":"Overview","text":"

The similar issue tool retrieves the most similar issues to the current issue. It can be invoked manually by commenting on any PR:

/similar_issue\n

"},{"location":"tools/similar_issues/#example-usage","title":"Example usage","text":"

Note that to perform retrieval, the similar_issue tool indexes all the repo previous issues (once).

Select VectorDBs by changing pr_similar_issue parameter in configuration.toml file

2 VectorDBs are available to switch in 1. LanceDB 2. Pinecone

To enable usage of the 'similar issue' tool for Pinecone, you need to set the following keys in .secrets.toml (or in the relevant environment variables):

[pinecone]\napi_key = \"...\"\nenvironment = \"...\"\n
These parameters can be obtained by registering to Pinecone.

"},{"location":"tools/similar_issues/#how-to-use","title":"How to use","text":"
  • To invoke the 'similar issue' tool from CLI, run: python3 cli.py --issue_url=... similar_issue

  • To invoke the 'similar' issue tool via online usage, comment on a PR: /similar_issue

  • You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the pr_commands list in the github_app section

"},{"location":"tools/test/","title":"\ud83d\udc8e Test","text":""},{"location":"tools/test/#overview","title":"Overview","text":"

By combining LLM abilities with static code analysis, the test tool generate tests for a selected component, based on the PR code changes. It can be invoked manually by commenting on any PR:

/test component_name\n
where 'component_name' is the name of a specific component in the PR. To get a list of the components that changed in the PR and choose the relevant component interactively, use the analyze tool.

"},{"location":"tools/test/#example-usage","title":"Example usage","text":"

Invoke the tool manually by commenting /test on any PR: The tool will generate tests for the selected component (if no component is stated, it will generate tests for largest component):

(Example taken from here):

Notes - Language that are currently supported by the tool: Python, Java, C++, JavaScript, TypeScript, C#. - This tool can also be triggered interactively by using the analyze tool.

"},{"location":"tools/test/#configuration-options","title":"Configuration options","text":"
  • num_tests: number of tests to generate. Default is 3.
  • testing_framework: the testing framework to use. If not set, for Python it will use pytest, for Java it will use JUnit, for C++ it will use Catch2, and for JavaScript and TypeScript it will use jest.
  • avoid_mocks: if set to true, the tool will try to avoid using mocks in the generated tests. Note that even if this option is set to true, the tool might still use mocks if it cannot generate a test without them. Default is true.
  • extra_instructions: Optional extra instructions to the tool. For example: \"use the following mock injection scheme: ...\".
  • file: in case there are several components with the same name, you can specify the relevant file.
  • class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.
  • enable_help_text: if set to true, the tool will add a help text to the PR comment. Default is true.
"},{"location":"tools/update_changelog/","title":"Update Changelog","text":""},{"location":"tools/update_changelog/#overview","title":"Overview","text":"

The update_changelog tool automatically updates the CHANGELOG.md file with the PR changes. It can be invoked manually by commenting on any PR:

/update_changelog\n

"},{"location":"tools/update_changelog/#example-usage","title":"Example usage","text":""},{"location":"tools/update_changelog/#configuration-options","title":"Configuration options","text":"

Under the section pr_update_changelog, the configuration file contains options to customize the 'update changelog' tool:

  • push_changelog_changes: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
  • extra_instructions: Optional extra instructions to the tool. For example: \"focus on the changes in the file X. Ignore change in ...
"},{"location":"usage-guide/","title":"Usage guide","text":"

This page provides a detailed guide on how to use Qodo Merge. It includes information on how to adjust Qodo Merge configurations, define which tools will run automatically, and other advanced configurations.

  • Introduction
  • Configuration File
  • Usage and Automation
    • Local Repo (CLI)
    • Online Usage
    • GitHub App
    • GitHub Action
    • GitLab Webhook
    • BitBucket App
    • Azure DevOps Provider
  • Managing Mail Notifications
  • Changing a Model
  • Additional Configurations Walkthrough
    • Ignoring files from analysis
    • Extra instructions
    • Working with large PRs
    • Changing a model
    • Patch Extra Lines
    • Editing the prompts
  • Qodo Merge Pro Models
"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/","title":"EXAMPLE BEST PRACTICE","text":""},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#recommend-python-best-practices","title":"Recommend Python Best Practices","text":"

This document outlines a series of recommended best practices for Python development. These guidelines aim to improve code quality, maintainability, and readability.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#imports","title":"Imports","text":"

Use import statements for packages and modules only, not for individual types, classes, or functions.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#definition","title":"Definition","text":"

Reusability mechanism for sharing code from one module to another.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision","title":"Decision","text":"
  • Use import x for importing packages and modules.
  • Use from x import y where x is the package prefix and y is the module name with no prefix.
  • Use from x import y as z in any of the following circumstances:
    • Two modules named y are to be imported.
    • y conflicts with a top-level name defined in the current module.
    • y conflicts with a common parameter name that is part of the public API (e.g., features).
    • y is an inconveniently long name, or too generic in the context of your code
  • Use import y as z only when z is a standard abbreviation (e.g., import numpy as np).

For example the module sound.effects.echo may be imported as follows:

from sound.effects import echo\n...\necho.EchoFilter(input, output, delay=0.7, atten=4)\n

Do not use relative names in imports. Even if the module is in the same package, use the full package name. This helps prevent unintentionally importing a package twice.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#exemptions","title":"Exemptions","text":"

Exemptions from this rule:

  • Symbols from the following modules are used to support static analysis and type checking:
    • typing module
    • collections.abc module
    • typing_extensions module
  • Redirects from the six.moves module.
"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#packages","title":"Packages","text":"

Import each module using the full pathname location of the module.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision_1","title":"Decision","text":"

All new code should import each module by its full package name.

Imports should be as follows:

Yes:\n  # Reference absl.flags in code with the complete name (verbose).\n  import absl.flags\n  from doctor.who import jodie\n\n  _FOO = absl.flags.DEFINE_string(...)\n
Yes:\n  # Reference flags in code with just the module name (common).\n  from absl import flags\n  from doctor.who import jodie\n\n  _FOO = flags.DEFINE_string(...)\n

(assume this file lives in doctor/who/ where jodie.py also exists)

No:\n  # Unclear what module the author wanted and what will be imported.  The actual\n  # import behavior depends on external factors controlling sys.path.\n  # Which possible jodie module did the author intend to import?\n  import jodie\n

The directory the main binary is located in should not be assumed to be in sys.path despite that happening in some environments. This being the case, code should assume that import jodie refers to a third-party or top-level package named jodie, not a local jodie.py.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#default-iterators-and-operators","title":"Default Iterators and Operators","text":"

Use default iterators and operators for types that support them, like lists, dictionaries, and files.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#definition_1","title":"Definition","text":"

Container types, like dictionaries and lists, define default iterators and membership test operators (\u201cin\u201d and \u201cnot in\u201d).

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision_2","title":"Decision","text":"

Use default iterators and operators for types that support them, like lists, dictionaries, and files. The built-in types define iterator methods, too. Prefer these methods to methods that return lists, except that you should not mutate a container while iterating over it.

Yes:  for key in adict: ...\n      if obj in alist: ...\n      for line in afile: ...\n      for k, v in adict.items(): ...\n
No:   for key in adict.keys(): ...\n      for line in afile.readlines(): ...\n
"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#lambda-functions","title":"Lambda Functions","text":"

Okay for one-liners. Prefer generator expressions over map() or filter() with a lambda.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision_3","title":"Decision","text":"

Lambdas are allowed. If the code inside the lambda function spans multiple lines or is longer than 60-80 chars, it might be better to define it as a regular nested function.

For common operations like multiplication, use the functions from the operator module instead of lambda functions. For example, prefer operator.mul to lambda x, y: x * y.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#default-argument-values","title":"Default Argument Values","text":"

Okay in most cases.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#definition_2","title":"Definition","text":"

You can specify values for variables at the end of a function\u2019s parameter list, e.g., def foo(a, b=0):. If foo is called with only one argument, b is set to 0. If it is called with two arguments, b has the value of the second argument.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision_4","title":"Decision","text":"

Okay to use with the following caveat:

Do not use mutable objects as default values in the function or method definition.

Yes: def foo(a, b=None):\n         if b is None:\n             b = []\nYes: def foo(a, b: Sequence | None = None):\n         if b is None:\n             b = []\nYes: def foo(a, b: Sequence = ()):  # Empty tuple OK since tuples are immutable.\n         ...\n
from absl import flags\n_FOO = flags.DEFINE_string(...)\n\nNo:  def foo(a, b=[]):\n         ...\nNo:  def foo(a, b=time.time()):  # Is `b` supposed to represent when this module was loaded?\n         ...\nNo:  def foo(a, b=_FOO.value):  # sys.argv has not yet been parsed...\n         ...\nNo:  def foo(a, b: Mapping = {}):  # Could still get passed to unchecked code.\n         ...\n
"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#truefalse-evaluations","title":"True/False Evaluations","text":"

Use the \u201cimplicit\u201d false if possible, e.g., if foo: rather than if foo != []:

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#lexical-scoping","title":"Lexical Scoping","text":"

Okay to use.

An example of the use of this feature is:

def get_adder(summand1: float) -> Callable[[float], float]:\n    \"\"\"Returns a function that adds numbers to a given number.\"\"\"\n    def adder(summand2: float) -> float:\n        return summand1 + summand2\n\n    return adder\n
"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#decision_5","title":"Decision","text":"

Okay to use.

"},{"location":"usage-guide/EXAMPLE_BEST_PRACTICE/#threading","title":"Threading","text":"

Do not rely on the atomicity of built-in types.

While Python\u2019s built-in data types such as dictionaries appear to have atomic operations, there are corner cases where they aren\u2019t atomic (e.g. if __hash__ or __eq__ are implemented as Python methods) and their atomicity should not be relied upon. Neither should you rely on atomic variable assignment (since this in turn depends on dictionaries).

Use the queue module\u2019s Queue data type as the preferred way to communicate data between threads. Otherwise, use the threading module and its locking primitives. Prefer condition variables and threading.Condition instead of using lower-level locks.

"},{"location":"usage-guide/PR_agent_pro_models/","title":"PR agent pro models","text":""},{"location":"usage-guide/PR_agent_pro_models/#qodo-merge-pro-models","title":"Qodo Merge Pro Models","text":"

The default models used by Qodo Merge Pro are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.

Users can configure Qodo Merge Pro to use solely a specific model by editing the configuration file.

For example, to restrict Qodo Merge Pro to using only Claude-3.5-sonnet, add this setting:

[config]\nmodel=\"claude-3-5-sonnet\"\n

Or to restrict Qodo Merge Pro to using only GPT-4o, add this setting:

[config]\nmodel=\"gpt-4o\"\n

"},{"location":"usage-guide/additional_configurations/","title":"Additional Configurations","text":""},{"location":"usage-guide/additional_configurations/#show-possible-configurations","title":"Show possible configurations","text":"

The possible configurations of Qodo Merge are stored in here. In the tools page you can find explanations on how to use these configurations for each tool.

To print all the available configurations as a comment on your PR, you can use the following command:

/config\n

To view the actual configurations used for a specific tool, after all the user settings are applied, you can add for each tool a --config.output_relevant_configurations=true suffix. For example:

/improve --config.output_relevant_configurations=true\n
Will output an additional field showing the actual configurations used for the improve tool.

"},{"location":"usage-guide/additional_configurations/#ignoring-files-from-analysis","title":"Ignoring files from analysis","text":"

In some cases, you may want to exclude specific files or directories from the analysis performed by Qodo Merge. This can be useful, for example, when you have files that are generated automatically or files that shouldn't be reviewed, like vendor code.

You can ignore files or folders using the following methods: - IGNORE.GLOB - IGNORE.REGEX

which you can edit to ignore files or folders based on glob or regex patterns.

"},{"location":"usage-guide/additional_configurations/#example-usage","title":"Example usage","text":"

Let's look at an example where we want to ignore all files with .py extension from the analysis.

To ignore Python files in a PR with online usage, comment on a PR: /review --ignore.glob=\"['*.py']\"

To ignore Python files in all PRs using glob pattern, set in a configuration file:

[ignore]\nglob = ['*.py']\n

And to ignore Python files in all PRs using regex pattern, set in a configuration file:

[regex]\nregex = ['.*\\.py$']\n

"},{"location":"usage-guide/additional_configurations/#extra-instructions","title":"Extra instructions","text":"

All Qodo Merge tools have a parameter called extra_instructions, that enables to add free-text extra instructions. Example usage:

/update_changelog --pr_update_changelog.extra_instructions=\"Make sure to update also the version ...\"\n

"},{"location":"usage-guide/additional_configurations/#working-with-large-prs","title":"Working with large PRs","text":"

The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens. This mode provides a very good speed-quality-cost tradeoff, and can handle most PRs successfully. When the PR is above the token limit, it employs a PR Compression strategy.

However, for very large PRs, or in case you want to emphasize quality over speed and cost, there are two possible solutions: 1) Use a model with larger context, like GPT-32K, or claude-100K. This solution will be applicable for all the tools. 2) For the /improve tool, there is an 'extended' mode (/improve --extended), which divides the PR into chunks, and processes each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur)

"},{"location":"usage-guide/additional_configurations/#patch-extra-lines","title":"Patch Extra Lines","text":"

By default, around any change in your PR, git patch provides three lines of context above and below the change.

@@ -12,5 +12,5 @@ def func1():\n code line that already existed in the file...\n code line that already existed in the file...\n code line that already existed in the file....\n-code line that was removed in the PR\n+new code line added in the PR\n code line that already existed in the file...\n code line that already existed in the file...\n code line that already existed in the file...\n

Qodo Merge will try to increase the number of lines of context, via the parameter:

[config]\npatch_extra_lines_before=3\npatch_extra_lines_after=1\n

Increasing this number provides more context to the model, but will also increase the token budget, and may overwhelm the model with too much information, unrelated to the actual PR code changes.

If the PR is too large (see PR Compression strategy), Qodo Merge may automatically set this number to 0, and will use the original git patch.

"},{"location":"usage-guide/additional_configurations/#editing-the-prompts","title":"Editing the prompts","text":"

The prompts for the various Qodo Merge tools are defined in the pr_agent/settings folder. In practice, the prompts are loaded and stored as a standard setting object. Hence, editing them is similar to editing any other configuration value - just place the relevant key in .pr_agent.tomlfile, and override the default value.

For example, if you want to edit the prompts of the describe tool, you can add the following to your .pr_agent.toml file:

[pr_description_prompt]\nsystem=\"\"\"\n...\n\"\"\"\nuser=\"\"\"\n...\n\"\"\"\n
Note that the new prompt will need to generate an output compatible with the relevant post-process function.

"},{"location":"usage-guide/additional_configurations/#integrating-with-logging-observability-platforms","title":"Integrating with Logging Observability Platforms","text":"

Various logging observability tools can be used out-of-the box when using the default LiteLLM AI Handler. Simply configure the LiteLLM callback settings in configuration.toml and set environment variables according to the LiteLLM documentation.

For example, to use LangSmith you can add the following to your configuration.toml file:

[litellm]\nenable_callbacks = true\nsuccess_callback = [\"langsmith\"]\nfailure_callback = [\"langsmith\"]\nservice_callback = []\n

Then set the following environment variables:

LANGSMITH_API_KEY=<api_key>\nLANGSMITH_PROJECT=<project>\nLANGSMITH_BASE_URL=<url>\n
"},{"location":"usage-guide/additional_configurations/#ignoring-automatic-commands-in-prs","title":"Ignoring automatic commands in PRs","text":"

In some cases, you may want to automatically ignore specific PRs . Qodo Merge enables you to ignore PR with a specific title, or from/to specific branches (regex matching).

To ignore PRs with a specific title such as \"[Bump]: ...\", you can add the following to your configuration.toml file:

[config]\nignore_pr_title = [\"\\\\[Bump\\\\]\"]\n

Where the ignore_pr_title is a list of regex patterns to match the PR title you want to ignore. Default is ignore_pr_title = [\"^\\\\[Auto\\\\]\", \"^Auto\"].

To ignore PRs from specific source or target branches, you can add the following to your configuration.toml file:

[config]\nignore_pr_source_branches = ['develop', 'main', 'master', 'stage']\nignore_pr_target_branches = [\"qa\"]\n

Where the ignore_pr_source_branches and ignore_pr_target_branches are lists of regex patterns to match the source and target branches you want to ignore. They are not mutually exclusive, you can use them together or separately.

"},{"location":"usage-guide/automations_and_usage/","title":"Usage and Automation","text":""},{"location":"usage-guide/automations_and_usage/#local-repo-cli","title":"Local repo (CLI)","text":"

When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used. Examples of invoking the different tools via the CLI:

  • Review: python -m pr_agent.cli --pr_url=<pr_url> review
  • Describe: python -m pr_agent.cli --pr_url=<pr_url> describe
  • Improve: python -m pr_agent.cli --pr_url=<pr_url> improve
  • Ask: python -m pr_agent.cli --pr_url=<pr_url> ask \"Write me a poem about this PR\"
  • Reflect: python -m pr_agent.cli --pr_url=<pr_url> reflect
  • Update Changelog: python -m pr_agent.cli --pr_url=<pr_url> update_changelog

<pr_url> is the url of the relevant PR (for example: #50).

Notes:

(1) in addition to editing your local configuration file, you can also change any configuration value by adding it to the command line:

python -m pr_agent.cli --pr_url=<pr_url>  /review --pr_reviewer.extra_instructions=\"focus on the file: ...\"\n

(2) You can print results locally, without publishing them, by setting in configuration.toml:

[config]\npublish_output=false\nverbosity_level=2\n
This is useful for debugging or experimenting with different tools.

(3)

git provider: The git_provider field in a configuration file determines the GIT provider that will be used by Qodo Merge. Currently, the following providers are supported: \"github\", \"gitlab\", \"bitbucket\", \"azure\", \"codecommit\", \"local\", \"gerrit\"

Default is \"github\".

"},{"location":"usage-guide/automations_and_usage/#online-usage","title":"Online usage","text":"

Online usage means invoking Qodo Merge tools by comments on a PR. Commands for invoking the different tools via comments:

  • Review: /review
  • Describe: /describe
  • Improve: /improve (or /improve_code for bitbucket, since /improve is sometimes reserved)
  • Ask: /ask \"...\"
  • Reflect: /reflect
  • Update Changelog: /update_changelog

To edit a specific configuration value, just add --config_path=<value> to any command. For example, if you want to edit the review tool configurations, you can run:

/review --pr_reviewer.extra_instructions=\"...\" --pr_reviewer.require_score_review=false\n
Any configuration value in configuration file file can be similarly edited. Comment /config to see the list of available configurations.

"},{"location":"usage-guide/automations_and_usage/#github-app","title":"GitHub App","text":"

Configurations for Qodo Merge Pro

Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users. Same goes for GitLab webhook and BitBucket App sections.

"},{"location":"usage-guide/automations_and_usage/#github-app-automatic-tools-when-a-new-pr-is-opened","title":"GitHub app automatic tools when a new PR is opened","text":"

The github_app section defines GitHub app specific configurations.

The configuration parameter pr_commands defines the list of tools that will be run automatically when a new PR is opened.

[github_app]\npr_commands = [\n    \"/describe --pr_description.final_update_message=false\",\n    \"/review --pr_reviewer.num_code_suggestions=0\",\n    \"/improve\",\n]\n
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the describe, review and improve tools. For the review tool, for example, the num_code_suggestions parameter will be set to 0.

You can override the default tool parameters by using one the three options for a configuration file: wiki, local, or global. For example, if your local .pr_agent.toml file contains:

[pr_description]\ngenerate_ai_title = true\n
Every time you run the describe tool, including automatic runs, the PR title will be generated by the AI.

To cancel the automatic run of all the tools, set:

[github_app]\npr_commands = []\n

"},{"location":"usage-guide/automations_and_usage/#github-app-automatic-tools-for-push-actions-commits-to-an-open-pr","title":"GitHub app automatic tools for push actions (commits to an open PR)","text":"

In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.

The configuration toggle handle_push_trigger can be used to enable this feature. The configuration parameter push_commands defines the list of tools that will be run automatically when new code is pushed to the PR.

[github_app]\nhandle_push_trigger = true\npush_commands = [\n    \"/describe\",\n    \"/review  --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false\",\n]\n
This means that when new code is pushed to the PR, the Qodo Merge will run the describe and review tools, with the specified parameters.

"},{"location":"usage-guide/automations_and_usage/#github-action","title":"GitHub Action","text":"

GitHub Action is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than GitHub App. You can configure settings for GitHub Action by adding environment variables under the env section in .github/workflows/pr_agent.yml file. Specifically, start by setting the following environment variables:

      env:\n        OPENAI_KEY: ${{ secrets.OPENAI_KEY }} # Make sure to add your OpenAI key to your repo secrets\n        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Make sure to add your GitHub token to your repo secrets\n        github_action_config.auto_review: \"true\" # enable\\disable auto review\n        github_action_config.auto_describe: \"true\" # enable\\disable auto describe\n        github_action_config.auto_improve: \"true\" # enable\\disable auto improve\n        github_action_config.pr_actions: [\"opened\", \"reopened\", \"ready_for_review\", \"review_requested\"]\n
github_action_config.auto_review, github_action_config.auto_describe and github_action_config.auto_improve are used to enable/disable automatic tools that run when a new PR is opened. If not set, the default configuration is for all three tools to run automatically when a new PR is opened.

github_action_config.pr_actions is used to configure which pull_requests events will trigger the enabled auto flags If not set, the default configuration is [\"opened\", \"reopened\", \"ready_for_review\", \"review_requested\"]

github_action_config.enable_output are used to enable/disable github actions output parameter (default is true). Review result is output as JSON to steps.{step-id}.outputs.review property. The JSON structure is equivalent to the yaml data structure defined in pr_reviewer_prompts.toml.

Note that you can give additional config parameters by adding environment variables to .github/workflows/pr_agent.yml, or by using a .pr_agent.toml configuration file in the root of your repo

For example, you can set an environment variable: pr_description.publish_labels=false, or add a .pr_agent.toml file with the following content:

[pr_description]\npublish_labels = false\n
to prevent Qodo Merge from publishing labels when running the describe tool.

"},{"location":"usage-guide/automations_and_usage/#gitlab-webhook","title":"GitLab Webhook","text":"

After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the pr_commands parameter in the configuration file, similar to the GitHub App:

[gitlab]\npr_commands = [\n    \"/describe\",\n    \"/review --pr_reviewer.num_code_suggestions=0\",\n    \"/improve\",\n]\n

the GitLab webhook can also respond to new code that is pushed to an open MR. The configuration toggle handle_push_trigger can be used to enable this feature. The configuration parameter push_commands defines the list of tools that will be run automatically when new code is pushed to the MR.

[gitlab]\nhandle_push_trigger = true\npush_commands = [\n    \"/describe\",\n    \"/review  --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false\",\n]\n

Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the \"Push events\" scope.

"},{"location":"usage-guide/automations_and_usage/#bitbucket-app","title":"BitBucket App","text":"

Similar to GitHub app, when running Qodo Merge from BitBucket App, the default configuration file from a pre-built docker will be initially loaded.

By uploading a local .pr_agent.toml file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload .pr_agent.toml prior to creating a PR, in order for the configuration to take effect.

For example, if your local .pr_agent.toml file contains:

[pr_reviewer]\nextra_instructions = \"Answer in japanese\"\n

Each time you invoke a /review tool, it will use the extra instructions you set in the local configuration file.

Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage. If you experience lack of responses from Qodo Merge, you might want to set: bitbucket_app.avoid_full_files=true in your configuration file. This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.

"},{"location":"usage-guide/automations_and_usage/#bitbucket-self-hosted-app-automatic-tools","title":"BitBucket Self-Hosted App automatic tools","text":"

To control which commands will run automatically when a new PR is opened, you can set the pr_commands parameter in the configuration file: Specifically, set the following values:

[bitbucket_app]\npr_commands = [\n    \"/review --pr_reviewer.num_code_suggestions=0\",\n    \"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7\",\n]\n
Note that we set specifically for bitbucket, we recommend using: --pr_code_suggestions.suggestions_score_threshold=7 and that is the default value we set for bitbucket. Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.

"},{"location":"usage-guide/automations_and_usage/#azure-devops-provider","title":"Azure DevOps provider","text":"

To use Azure DevOps provider use the following settings in configuration.toml:

[config]\ngit_provider=\"azure\"\n

Azure DevOps provider supports PAT token or DefaultAzureCredential authentication. PAT is faster to create, but has build in expiration date, and will use the user identity for API calls. Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.

If PAT was chosen, you can assign the value in .secrets.toml. If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly, or use managed identity/az cli (for local development) without any additional configuration. in any case, 'org' value must be assigned in .secrets.toml:

[azure_devops]\norg = \"https://dev.azure.com/YOUR_ORGANIZATION/\"\n# pat = \"YOUR_PAT_TOKEN\" needed only if using PAT for authentication\n

"},{"location":"usage-guide/automations_and_usage/#azure-devops-webhook","title":"Azure DevOps Webhook","text":"

To control which commands will run automatically when a new PR is opened, you can set the pr_commands parameter in the configuration file, similar to the GitHub App:

[azure_devops_server]\npr_commands = [\n    \"/describe\",\n    \"/review --pr_reviewer.num_code_suggestions=0\",\n    \"/improve\",\n]\n

"},{"location":"usage-guide/changing_a_model/","title":"Changing a Model","text":""},{"location":"usage-guide/changing_a_model/#changing-a-model","title":"Changing a model","text":"

See here for a list of available models. To use a different model than the default (GPT-4), you need to edit in the configuration file the fields:

[config]\nmodel = \"...\"\nmodel_turbo = \"...\"\nfallback_models = [\"...\"]\n

For models and environments not from OpenAI, you might need to provide additional keys and other parameters. You can give parameters via a configuration file (see below for instructions), or from environment variables. See litellm documentation for the environment variables relevant per model.

"},{"location":"usage-guide/changing_a_model/#azure","title":"Azure","text":"

To use Azure, set in your .secrets.toml (working from CLI), or in the GitHub Settings > Secrets and variables (working from GitHub App or GitHub Action):

[openai]\nkey = \"\" # your azure api key\napi_type = \"azure\"\napi_version = '2023-05-15'  # Check Azure documentation for the current API version\napi_base = \"\"  # The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\ndeployment_id = \"\"  # The deployment name you chose when you deployed the engine\n

and set in your configuration file:

[config]\nmodel=\"\" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)\nmodel_turbo=\"\" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)\nfallback_models=[\"...\"] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)\n

"},{"location":"usage-guide/changing_a_model/#hugging-face","title":"Hugging Face","text":"

Local You can run Hugging Face models locally through either VLLM or Ollama

E.g. to use a new Hugging Face model locally via Ollama, set:

[__init__.py]\nMAX_TOKENS = {\n    \"model-name-on-ollama\": <max_tokens>\n}\ne.g.\nMAX_TOKENS={\n    ...,\n    \"ollama/llama2\": 4096\n}\n\n\n[config] # in configuration.toml\nmodel = \"ollama/llama2\"\nmodel_turbo = \"ollama/llama2\"\nfallback_models=[\"ollama/llama2\"]\n\n[ollama] # in .secrets.toml\napi_base = ... # the base url for your Hugging Face inference endpoint\n# e.g. if running Ollama locally, you may use:\napi_base = \"http://localhost:11434/\"\n

"},{"location":"usage-guide/changing_a_model/#inference-endpoints","title":"Inference Endpoints","text":"

To use a new model with Hugging Face Inference Endpoints, for example, set:

[__init__.py]\nMAX_TOKENS = {\n    \"model-name-on-huggingface\": <max_tokens>\n}\ne.g.\nMAX_TOKENS={\n    ...,\n    \"meta-llama/Llama-2-7b-chat-hf\": 4096\n}\n[config] # in configuration.toml\nmodel = \"huggingface/meta-llama/Llama-2-7b-chat-hf\"\nmodel_turbo = \"huggingface/meta-llama/Llama-2-7b-chat-hf\"\nfallback_models=[\"huggingface/meta-llama/Llama-2-7b-chat-hf\"]\n\n[huggingface] # in .secrets.toml\nkey = ... # your Hugging Face api key\napi_base = ... # the base url for your Hugging Face inference endpoint\n
(you can obtain a Llama2 key from here)

"},{"location":"usage-guide/changing_a_model/#replicate","title":"Replicate","text":"

To use Llama2 model with Replicate, for example, set:

[config] # in configuration.toml\nmodel = \"replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1\"\nmodel_turbo = \"replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1\"\nfallback_models=[\"replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1\"]\n[replicate] # in .secrets.toml\nkey = ...\n
(you can obtain a Llama2 key from here)

Also, review the AiHandler file for instructions on how to set keys for other models.

"},{"location":"usage-guide/changing_a_model/#groq","title":"Groq","text":"

To use Llama3 model with Groq, for example, set:

[config] # in configuration.toml\nmodel = \"llama3-70b-8192\"\nmodel_turbo = \"llama3-70b-8192\"\nfallback_models = [\"groq/llama3-70b-8192\"] \n[groq] # in .secrets.toml\nkey = ... # your Groq api key\n
(you can obtain a Groq key from here)

"},{"location":"usage-guide/changing_a_model/#vertex-ai","title":"Vertex AI","text":"

To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set:

[config] # in configuration.toml\nmodel = \"vertex_ai/codechat-bison\"\nmodel_turbo = \"vertex_ai/codechat-bison\"\nfallback_models=\"vertex_ai/codechat-bison\"\n\n[vertexai] # in .secrets.toml\nvertex_project = \"my-google-cloud-project\"\nvertex_location = \"\"\n

Your application default credentials will be used for authentication so there is no need to set explicit credentials in most environments.

If you do want to set explicit credentials, then you can use the GOOGLE_APPLICATION_CREDENTIALS environment variable set to a path to a json credentials file.

"},{"location":"usage-guide/changing_a_model/#anthropic","title":"Anthropic","text":"

To use Anthropic models, set the relevant models in the configuration section of the configuration file:

[config]\nmodel=\"anthropic/claude-3-opus-20240229\"\nmodel_turbo=\"anthropic/claude-3-opus-20240229\"\nfallback_models=[\"anthropic/claude-3-opus-20240229\"]\n

And also set the api key in the .secrets.toml file:

[anthropic]\nKEY = \"...\"\n

"},{"location":"usage-guide/changing_a_model/#amazon-bedrock","title":"Amazon Bedrock","text":"

To use Amazon Bedrock and its foundational models, add the below configuration:

[config] # in configuration.toml\nmodel=\"bedrock/anthropic.claude-3-sonnet-20240229-v1:0\"\nmodel_turbo=\"bedrock/anthropic.claude-3-sonnet-20240229-v1:0\"\nfallback_models=[\"bedrock/anthropic.claude-v2:1\"]\n

Note that you have to add access to foundational models before using them. Please refer to this document for more details.

If you are using the claude-3 model, please configure the following settings as there are parameters incompatible with claude-3.

[litellm]\ndrop_params = true\n

AWS session is automatically authenticated from your environment, but you can also explicitly set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION_NAME environment variables. Please refer to this document for more details.

"},{"location":"usage-guide/changing_a_model/#custom-models","title":"Custom models","text":"

If the relevant model doesn't appear here, you can still use it as a custom model:

(1) Set the model name in the configuration file:

[config]\nmodel=\"custom_model_name\"\nmodel_turbo=\"custom_model_name\"\nfallback_models=[\"custom_model_name\"]\n
(2) Set the maximal tokens for the model:
[config]\ncustom_model_max_tokens= ...\n
(3) Go to litellm documentation, find the model you want to use, and set the relevant environment variables.

"},{"location":"usage-guide/configuration_options/","title":"Configuration File","text":"

The different tools and sub-tools used by Qodo Merge are adjustable via the configuration file.

In addition to general configuration options, each tool has its own configurations. For example, the review tool will use parameters from the pr_reviewer section in the configuration file. See the Tools Guide for a detailed description of the different tools and their configurations.

There are three ways to set persistent configurations:

  1. Wiki configuration page \ud83d\udc8e
  2. Local configuration file
  3. Global configuration file \ud83d\udc8e

In terms of precedence, wiki configurations will override local configurations, and local configurations will override global configurations.

Tip1: edit only what you need

Your configuration file should be minimal, and edit only the relevant values. Don't copy the entire configuration options, since it can lead to legacy problems when something changes.

Tip2: show relevant configurations

If you set config.output_relevant_configurations=true, each tool will also output in a collapsible section its relevant configurations. This can be useful for debugging, or getting to know the configurations better.

"},{"location":"usage-guide/configuration_options/#wiki-configuration-file","title":"Wiki configuration file \ud83d\udc8e","text":"

Platforms supported: GitHub, GitLab, Bitbucket

With Qodo Merge Pro, you can set configurations by creating a page called .pr_agent.toml in the wiki of the repo. The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and save.

Click here to see a short instructional video. We recommend surrounding the configuration content with triple-quotes (or ```toml), to allow better presentation when displayed in the wiki as markdown. An example content:

[pr_description]\ngenerate_ai_title=true\n

Qodo Merge will know to remove the surrounding quotes when reading the configuration content.

"},{"location":"usage-guide/configuration_options/#local-configuration-file","title":"Local configuration file","text":"

Platforms supported: GitHub, GitLab, Bitbucket, Azure DevOps

By uploading a local .pr_agent.toml file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload .pr_agent.toml prior to creating a PR, in order for the configuration to take effect.

For example, if you set in .pr_agent.toml:

[pr_reviewer]\nextra_instructions=\"\"\"\\\n- instruction a\n- instruction b\n...\n\"\"\"\n

Then you can give a list of extra instructions to the review tool.

"},{"location":"usage-guide/configuration_options/#global-configuration-file","title":"Global configuration file \ud83d\udc8e","text":"

Platforms supported: GitHub, GitLab, Bitbucket

If you create a repo called pr-agent-settings in your organization, it's configuration file .pr_agent.toml will be used as a global configuration file for any other repo that belongs to the same organization. Parameters from a local .pr_agent.toml file, in a specific repo, will override the global configuration parameters.

For example, in the GitHub organization Codium-ai:

  • The file https://github.com/Codium-ai/pr-agent-settings/.pr_agent.toml serves as a global configuration file for all the repos in the GitHub organization Codium-ai.

  • The repo https://github.com/Codium-ai/pr-agent inherits the global configuration file from pr-agent-settings.

"},{"location":"usage-guide/introduction/","title":"Introduction","text":"

After installation, there are three basic ways to invoke Qodo Merge:

  1. Locally running a CLI command
  2. Online usage - by commenting on a PR
  3. Enabling Qodo Merge tools to run automatically when a new PR is opened

Specifically, CLI commands can be issued by invoking a pre-built docker image, or by invoking a locally cloned repo.

For online usage, you will need to setup either a GitHub App or a GitHub Action (GitHub), a GitLab webhook (GitLab), or a BitBucket App (BitBucket). These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.

"},{"location":"usage-guide/mail_notifications/","title":"Managing Mail Notifications","text":"

Unfortunately, it is not possible in GitHub to disable mail notifications from a specific user. If you are subscribed to notifications for a repo with Qodo Merge, we recommend turning off notifications for PR comments, to avoid lengthy emails:

As an alternative, you can filter in your mail provider the notifications specifically from the Qodo Merge bot, see how.

Another option to reduce the mail overload, yet still receive notifications on Qodo Merge tools, is to disable the help collapsible section in Qodo Merge bot comments. This can done by setting enable_help_text=false for the relevant tool in the configuration file. For example, to disable the help text for the pr_reviewer tool, set:

[pr_reviewer]\nenable_help_text = false\n

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..0f8724efd --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 000000000..a39daccec Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tools/analyze/index.html b/tools/analyze/index.html new file mode 100644 index 000000000..cdbe8e2ba --- /dev/null +++ b/tools/analyze/index.html @@ -0,0 +1,2157 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Analyze - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Analyze

+ +

Overview

+

The analyze tool combines advanced static code analysis with LLM capabilities to provide a comprehensive analysis of the PR code changes.

+

The tool scans the PR code changes, finds the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.

+

It can be invoked manually by commenting on any PR: +

/analyze
+

+

Example usage

+

An example result:

+

Analyze 1

+

Notes

+
    +
  • Language that are currently supported: Python, Java, C++, JavaScript, TypeScript, C#.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/ask/index.html b/tools/ask/index.html new file mode 100644 index 000000000..ae536496a --- /dev/null +++ b/tools/ask/index.html @@ -0,0 +1,2216 @@ + + + + + + + + + + + + + + + + + + + + + + + Ask - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Ask

+ +

Overview

+

The ask tool answers questions about the PR, based on the PR code changes. Make sure to be specific and clear in your questions. +It can be invoked manually by commenting on any PR: +

/ask "..."
+

+

Example usage

+

Ask Comment

+

Ask

+

Ask lines

+

You can run /ask on specific lines of code in the PR from the PR's diff view. The tool will answer questions based on the code changes in the selected lines. +- Click on the '+' sign next to the line number to select the line. +- To select multiple lines, click on the '+' sign of the first line and then hold and drag to select the rest of the lines. +- write /ask "..." in the comment box and press Add single comment button.

+

Ask Line

+

Note that the tool does not have "memory" of previous questions, and answers each question independently.

+

Ask on images

+

You can also ask questions about images that appear in the comment, where the entire PR code will be used as context. +
+The basic syntax is: +

/ask "..."
+
+[Image](https://real_link_to_image)
+
+where https://real_link_to_image is the direct link to the image.

+

Note that GitHub has a built-in mechanism of pasting images in comments. However, pasted image does not provide a direct link. +To get a direct link to an image, we recommend using the following scheme:

+

1) First, post a comment that contains only the image:

+

Ask image1

+

2) Quote reply to that comment:

+

Ask image2

+

3) In the screen opened, type the question below the image:

+

Ask image3 +Ask image4

+

4) Post the comment, and receive the answer:

+

Ask image5

+

See a full video tutorial here

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/ci_feedback/index.html b/tools/ci_feedback/index.html new file mode 100644 index 000000000..8ea8b2fc1 --- /dev/null +++ b/tools/ci_feedback/index.html @@ -0,0 +1,2211 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 CI Feedback - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 CI Feedback

+ +

Overview

+

The CI feedback tool (/checks) automatically triggers when a PR has a failed check. +The tool analyzes the failed checks and provides several feedbacks:

+
    +
  • Failed stage
  • +
  • Failed test name
  • +
  • Failure summary
  • +
  • Relevant error logs
  • +
+

Example usage

+

Failed Check 1

+

→ +Failed Check 2

+
+

In addition to being automatically triggered, the tool can also be invoked manually by commenting on a PR: +

/checks "https://github.com/{repo_name}/actions/runs/{run_number}/job/{job_number}"
+
+where {repo_name} is the name of the repository, {run_number} is the run number of the failed check, and {job_number} is the job number of the failed check.

+

Disabling the tool from running automatically

+

If you wish to disable the tool from running automatically, you can do so by adding the following configuration to the configuration file: +

[checks]
+enable_auto_checks_feedback = false
+

+

Configuration options

+
    +
  • enable_auto_checks_feedback - if set to true, the tool will automatically provide feedback when a check is failed. Default is true.
  • +
  • excluded_checks_list - a list of checks to exclude from the feedback, for example: ["check1", "check2"]. Default is an empty list.
  • +
  • persistent_comment - if set to true, the tool will overwrite a previous checks comment with the new feedback. Default is true.
  • +
  • enable_help_text=true - if set to true, the tool will provide a help message when a user comments "/checks" on a PR. Default is true.
  • +
  • final_update_message - if persistent_comment is true and updating a previous checks message, the tool will also create a new message: "Persistent checks updated to latest commit". Default is true.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/custom_labels/index.html b/tools/custom_labels/index.html new file mode 100644 index 000000000..a58de2ad4 --- /dev/null +++ b/tools/custom_labels/index.html @@ -0,0 +1,2240 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Custom Labels - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Custom Labels

+ +

Overview

+

The generate_labels tool scans the PR code changes, and given a list of labels and their descriptions, it automatically suggests labels that match the PR code changes.

+

It can be invoked manually by commenting on any PR: +

/generate_labels
+

+

Example usage

+

If we wish to add detect changes to SQL queries in a given PR, we can add the following custom label along with its description:

+

Custom labels list

+

When running the generate_labels tool on a PR that includes changes in SQL queries, it will automatically suggest the custom label:

+

Custom labels published

+

Note that in addition to the dedicated tool generate_labels, the custom labels will also be used by the describe tool.

+

How to enable custom labels

+

There are 3 ways to enable custom labels:

+

1. CLI (local configuration file)

+

When working from CLI, you need to apply the configuration changes to the custom_labels file:

+

2. Repo configuration file

+

To enable custom labels, you need to apply the configuration changes to the local .pr_agent.toml file in your repository.

+

3. Handle custom labels from the Repo's labels page 💎

+
+

This feature is available only in Qodo Merge Pro

+
+
    +
  • GitHub : https://github.com/{owner}/{repo}/labels, or click on the "Labels" tab in the issues or PRs page.
  • +
  • GitLab : https://gitlab.com/{owner}/{repo}/-/labels, or click on "Manage" -> "Labels" on the left menu.
  • +
+

b. Add/edit the custom labels. It should be formatted as follows: +* Label name: The name of the custom label. +* Description: Start the description of with prefix pr_agent:, for example: pr_agent: Description of when AI should suggest this label.
+The description should be comprehensive and detailed, indicating when to add the desired label.

+

Add native custom labels

+

c. Now the custom labels will be included in the generate_labels tool.

+
+

This feature is supported in GitHub and GitLab.

+
+

Configuration options

+
    +
  • Change enable_custom_labels to True: This will turn off the default labels and enable the custom labels provided in the custom_labels.toml file.
  • +
  • Add the custom labels. It should be formatted as follows:
  • +
+
[config]
+enable_custom_labels=true
+
+[custom_labels."Custom Label Name"]
+description = "Description of when AI should suggest this label"
+
+[custom_labels."Custom Label 2"]
+description = "Description of when AI should suggest this label 2"
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/custom_prompt/index.html b/tools/custom_prompt/index.html new file mode 100644 index 000000000..0d813da6c --- /dev/null +++ b/tools/custom_prompt/index.html @@ -0,0 +1,2202 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Custom Prompt - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Custom Prompt

+ +

Overview

+

The custom_prompt tool scans the PR code changes, and automatically generates suggestions for improving the PR code. +It shares similarities with the improve tool, but with one main difference: the custom_prompt tool will only propose suggestions that follow specific guidelines defined by the prompt in: pr_custom_prompt.prompt configuration.

+

The tool can be triggered automatically every time a new PR is opened, or can be invoked manually by commenting on a PR.

+

When commenting, use the following template:

+
/custom_prompt --pr_custom_prompt.prompt="
+The code suggestions should focus only on the following:
+- ...
+- ...
+
+"
+
+

With a configuration file, use the following template:

+
[pr_custom_prompt]
+prompt="""\
+The suggestions should focus only on the following:
+-...
+-...
+
+"""
+
+

Remember - with this tool, you are the prompter. Be specific, clear, and concise in the instructions. Specify relevant aspects that you want the model to focus on. \ +You might benefit from several trial-and-error iterations, until you get the correct prompt for your use case.

+

Example usage

+

Here is an example of a possible prompt, defined in the configuration file: +

[pr_custom_prompt]
+prompt="""\
+The code suggestions should focus only on the following:
+- look for edge cases when implementing a new function
+- make sure every variable has a meaningful name
+- make sure the code is efficient
+"""
+

+

(The instructions above are just an example. We want to emphasize that the prompt should be specific and clear, and be tailored to the needs of your project)

+

Results obtained with the prompt above:

+

Custom prompt results

+

Configuration options

+

prompt: the prompt for the tool. It should be a multi-line string.

+

num_code_suggestions: number of code suggestions provided by the 'custom_prompt' tool. Default is 4.

+

enable_help_text: if set to true, the tool will display a help text in the comment. Default is true.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/describe/index.html b/tools/describe/index.html new file mode 100644 index 000000000..3a2b0ee56 --- /dev/null +++ b/tools/describe/index.html @@ -0,0 +1,2521 @@ + + + + + + + + + + + + + + + + + + + + + + + Describe - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Describe

+ +

Overview

+

The describe tool scans the PR code changes, and generates a description for the PR - title, type, summary, walkthrough and labels.

+

The tool can be triggered automatically every time a new PR is opened, or it can be invoked manually by commenting on any PR: +

/describe
+

+

Example usage

+

Manual triggering

+

Invoke the tool manually by commenting /describe on any PR:

+

Describe comment

+

After ~30 seconds, the tool will generate a description for the PR:

+

Describe New

+

If you want to edit configurations, add the relevant ones to the command: +

/describe --pr_description.some_config1=... --pr_description.some_config2=...
+

+

Automatic triggering

+

To run the describe automatically when a PR is opened, define in a configuration file: +

[github_app]
+pr_commands = [
+    "/describe",
+    ...
+]
+
+[pr_description]
+publish_labels = true
+...
+

+
    +
  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • +
  • The [pr_description] section contains the configurations for the describe tool you want to edit (if any).
  • +
+

Configuration options

+
+

Possible configurations

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
publish_labelsIf set to true, the tool will publish labels to the PR. Default is false.
publish_description_as_commentIf set to true, the tool will publish the description as a comment to the PR. If false, it will overwrite the original description. Default is false.
publish_description_as_comment_persistentIf set to true and `publish_description_as_comment` is true, the tool will publish the description as a persistent comment to the PR. Default is true.
add_original_user_descriptionIf set to true, the tool will add the original user description to the generated description. Default is true.
generate_ai_titleIf set to true, the tool will also generate an AI title for the PR. Default is false.
extra_instructionsOptional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ..."
enable_pr_typeIf set to false, it will not show the `PR type` as a text value in the description content. Default is true.
final_update_messageIf set to true, it will add a comment message [`PR Description updated to latest commit...`](https://github.com/Codium-ai/pr-agent/pull/499#issuecomment-1837412176) after finishing calling `/describe`. Default is false.
enable_semantic_files_typesIf set to true, "Changes walkthrough" section will be generated. Default is true.
collapsible_file_listIf set to true, the file list in the "Changes walkthrough" section will be collapsible. If set to "adaptive", the file list will be collapsible only if there are more than 8 files. Default is "adaptive".
enable_large_pr_handlingPro feature. If set to true, in case of a large PR the tool will make several calls to the AI and combine them to be able to cover more files. Default is true.
enable_help_textIf set to true, the tool will display a help text in the comment. Default is false.
+ +

Inline file summary 💎

+

This feature enables you to copy the changes walkthrough table to the "Files changed" tab, so you can quickly understand the changes in each file while reviewing the code changes (diff view).

+

To copy the changes walkthrough table to the "Files changed" tab, you can click on the checkbox that appears PR Description status message below the main PR Description:

+

Add table checkbox

+

If you prefer to have the file summaries appear in the "Files changed" tab on every PR, change the pr_description.inline_file_summary parameter in the configuration file, possible values are:

+
    +
  • 'table': File changes walkthrough table will be displayed on the top of the "Files changed" tab, in addition to the "Conversation" tab.
  • +
+

Diffview table

+
    +
  • true: A collapsible file comment with changes title and a changes summary for each file in the PR.
  • +
+

Diffview changes

+
    +
  • false (default): File changes walkthrough will be added only to the "Conversation" tab.
  • +
+

Note: that this feature is currently available only for GitHub.

+

Markers template

+

To enable markers, set pr_description.use_description_markers=true. +Markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.

+

For example, if the PR original description was: +

User content...
+
+## PR Type:
+pr_agent:type
+
+## PR Description:
+pr_agent:summary
+
+## PR Walkthrough:
+pr_agent:walkthrough
+
+The marker pr_agent:type will be replaced with the PR type, pr_agent:summary will be replaced with the PR summary, and pr_agent:walkthrough will be replaced with the PR walkthrough.

+

Describe markers before

+

+

Describe markers after

+

Configuration params:

+
    +
  • use_description_markers: if set to true, the tool will use markers template. It replaces every marker of the form pr_agent:marker_name with the relevant content. Default is false.
  • +
  • include_generated_by_header: if set to true, the tool will add a dedicated header: 'Generated by PR Agent at ...' to any automatic content. Default is true.
  • +
+

Custom labels

+

The default labels of the describe tool are quite generic, since they are meant to be used in any repo: [Bug fix, Tests, Enhancement, Documentation, Other].

+

You can define custom labels that are relevant for your repo and use cases. +Custom labels can be defined in a configuration file, or directly in the repo's labels page.

+

Make sure to provide proper title, and a detailed and well-phrased description for each label, so the tool will know when to suggest it. +Each label description should be a conditional statement, that indicates if to add the label to the PR or not, according to the PR content.

+

Handle custom labels from a configuration file

+

Example for a custom labels configuration setup in a configuration file: +

[config]
+enable_custom_labels=true
+
+
+[custom_labels."sql_changes"]
+description = "Use when a PR contains changes to SQL queries"
+
+[custom_labels."test"]
+description = "use when a PR primarily contains new tests"
+
+...
+

+

Handle custom labels from the Repo's labels page 💎

+

You can also control the custom labels that will be suggested by the describe tool from the repo's labels page:

+
    +
  • GitHub : go to https://github.com/{owner}/{repo}/labels (or click on the "Labels" tab in the issues or PRs page)
  • +
  • GitLab : go to https://gitlab.com/{owner}/{repo}/-/labels (or click on "Manage" -> "Labels" on the left menu)
  • +
+

Now add/edit the custom labels. they should be formatted as follows:

+
    +
  • Label name: The name of the custom label.
  • +
  • Description: Start the description of with prefix pr_agent:, for example: pr_agent: Description of when AI should suggest this label.
  • +
+

Examples for custom labels:

+
    +
  • Main topic:performance - pr_agent:The main topic of this PR is performance
  • +
  • New endpoint - pr_agent:A new endpoint was added in this PR
  • +
  • SQL query - pr_agent:A new SQL query was added in this PR
  • +
  • Dockerfile changes - pr_agent:The PR contains changes in the Dockerfile
  • +
  • ...
  • +
+

The description should be comprehensive and detailed, indicating when to add the desired label. For example: +Add native custom labels

+

Usage Tips

+
+

Automation

+
    +
  • When you first install Qodo Merge app, the default mode for the describe tool is: +
    pr_commands = ["/describe", ...]
    +
    +meaning the describe tool will run automatically on every PR, with the default configurations.
  • +
+
+
    +
  • +

    Markers are an alternative way to control the generated description, to give maximal control to the user. If you set: +

    pr_commands = ["/describe --pr_description.use_description_markers=true", ...]
    +
    + the tool will replace every marker of the form pr_agent:marker_name in the PR description with the relevant content, where marker_name is one of the following: + * type: the PR type. + * summary: the PR summary. + * walkthrough: the PR walkthrough.

    +
  • +
  • +

    Note that when markers are enabled, if the original PR description does not contain any markers, the tool will not alter the description at all.

    +
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/documentation/index.html b/tools/documentation/index.html new file mode 100644 index 000000000..2392ca856 --- /dev/null +++ b/tools/documentation/index.html @@ -0,0 +1,2186 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Documentation - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Documentation

+ +

Overview

+

The add_docs tool scans the PR code changes, and automatically suggests documentation for any code components that changed in the PR (functions, classes, etc.).

+

It can be invoked manually by commenting on any PR: +

/add_docs
+

+

Example usage

+

Invoke the tool manually by commenting /add_docs on any PR:

+

Docs command

+

The tool will generate documentation for all the components that changed in the PR:

+

Docs component

+

Docs single component

+

You can state a name of a specific component in the PR to get documentation only for that component: +

/add_docs component_name
+

+

Configuration options

+
    +
  • docs_style: The exact style of the documentation (for python docstring). you can choose between: google, numpy, sphinx, restructuredtext, plain. Default is sphinx.
  • +
  • extra_instructions: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
  • +
+

Notes

+
    +
  • Language that are currently fully supported: Python, Java, C++, JavaScript, TypeScript, C#.
  • +
  • This tool can also be triggered interactively by using the analyze tool.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/help/index.html b/tools/help/index.html new file mode 100644 index 000000000..efd3692c9 --- /dev/null +++ b/tools/help/index.html @@ -0,0 +1,2155 @@ + + + + + + + + + + + + + + + + + + + + + + + Help - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Help

+ +

Overview

+

The help tool provides a list of all the available tools and their descriptions. +For Qodo Merge Pro users, it also enables to trigger each tool by checking the relevant box.

+

It can be invoked manually by commenting on any PR: +

/help
+

+

Example usage

+

An example result:

+

Help 1

+

+

Analyze 2

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/improve/index.html b/tools/improve/index.html new file mode 100644 index 000000000..8cfaa857d --- /dev/null +++ b/tools/improve/index.html @@ -0,0 +1,2613 @@ + + + + + + + + + + + + + + + + + + + + + + + Improve - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Improve

+ +

Overview

+

The improve tool scans the PR code changes, and automatically generates meaningful suggestions for improving the PR code. +The tool can be triggered automatically every time a new PR is opened, or it can be invoked manually by commenting on any PR: +

/improve
+

+

code_suggestions_as_comment_closed.png

+

code_suggestions_as_comment_open.png

+

Note that the Apply this suggestion checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge Pro 💎 users.

+

Example usage

+

Manual triggering

+

Invoke the tool manually by commenting /improve on any PR. The code suggestions by default are presented as a single comment:

+

To edit configurations related to the improve tool, use the following template: +

/improve --pr_code_suggestions.some_config1=... --pr_code_suggestions.some_config2=...
+

+

For example, you can choose to present all the suggestions as commitable code comments, by running the following command: +

/improve --pr_code_suggestions.commitable_code_suggestions=true
+

+

improve

+

As can be seen, a single table comment has a significantly smaller PR footprint. We recommend this mode for most cases. +Also note that collapsible are not supported in Bitbucket. Hence, the suggestions can only be presented in Bitbucket as code comments.

+

Automatic triggering

+

To run the improve automatically when a PR is opened, define in a configuration file: +

[github_app]
+pr_commands = [
+    "/improve",
+    ...
+]
+
+[pr_code_suggestions]
+num_code_suggestions_per_chunk = ...
+...
+

+
    +
  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • +
  • The [pr_code_suggestions] section contains the configurations for the improve tool you want to edit (if any)
  • +
+

Assessing Impact 💎

+

Note that Qodo Merge pro tracks two types of implementations:

+
    +
  • Direct implementation - when the user directly applies the suggestion by clicking the Apply checkbox.
  • +
  • Indirect implementation - when the user implements the suggestion in their IDE environment. In this case, Qodo Merge will utilize, after each commit, a dedicated logic to identify if a suggestion was implemented, and will mark it as implemented.
  • +
+

code_suggestions_asses_impact

+

In post-process, Qodo Merge counts the number of suggestions that were implemented, and provides general statistics and insights about the suggestions' impact on the PR process.

+

code_suggestions_asses_impact_stats_1

+

code_suggestions_asses_impact_stats_2

+

Usage Tips

+

Implementing the proposed code suggestions

+

Each generated suggestion consists of three key elements:

+
    +
  1. A single-line summary of the proposed change
  2. +
  3. An expandable section containing a comprehensive description of the suggestion
  4. +
  5. A diff snippet showing the recommended code modification (before and after)
  6. +
+

We advise users to apply critical analysis and judgment when implementing the proposed suggestions. +In addition to mistakes (which may happen, but are rare), sometimes the presented code modification may serve more as an illustrative example than a direct applicable solution. +In such cases, we recommend prioritizing the suggestion's detailed description, using the diff snippet primarily as a supporting reference.

+

Dual publishing mode

+

Our recommended approach for presenting code suggestions is through a table (--pr_code_suggestions.commitable_code_suggestions=false). +This method significantly reduces the PR footprint and allows for quick and easy digestion of multiple suggestions.

+

We also offer a complementary dual publishing mode. When enabled, suggestions exceeding a certain score threshold are not only displayed in the table, but also presented as commitable PR comments. +This mode helps highlight suggestions deemed more critical.

+

To activate dual publishing mode, use the following setting:

+
[pr_code_suggestions]
+dual_publishing_score_threshold = x
+
+

Where x represents the minimum score threshold (>=) for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).

+

Self-review

+

If you set in a configuration file: +

[pr_code_suggestions]
+demand_code_suggestions_self_review = true
+

+

The improve tool will add a checkbox below the suggestions, prompting user to acknowledge that they have reviewed the suggestions. +You can set the content of the checkbox text via: +

[pr_code_suggestions]
+code_suggestions_self_review_text = "... (your text here) ..."
+

+

self_review_1

+
+

Tip - Reducing visual footprint after self-review 💎

+

The configuration parameter pr_code_suggestions.fold_suggestions_on_self_review (default is True) +can be used to automatically fold the suggestions after the user clicks the self-review checkbox.

+

This reduces the visual footprint of the suggestions, and also indicates to the PR reviewer that the suggestions have been reviewed by the PR author, and don't require further attention.

+
+
+

Tip - Demanding self-review from the PR author 💎

+

By setting: +

[pr_code_suggestions]
+approve_pr_on_self_review = true
+
+the tool can automatically add an approval when the PR author clicks the self-review checkbox.

+
    +
  • If you set the number of required reviewers for a PR to 2, this effectively means that the PR author must click the self-review checkbox before the PR can be merged (in addition to a human reviewer).
  • +
+

self_review_2

+
    +
  • +

    If you keep the number of required reviewers for a PR to 1 and enable this configuration, this effectively means that the PR author can approve the PR by actively clicking the self-review checkbox.

    +

    To prevent unauthorized approvals, this configuration defaults to false, and cannot be altered through online comments; enabling requires a direct update to the configuration file and a commit to the repository. This ensures that utilizing the feature demands a deliberate documented decision by the repository owner.

    +
  • +
+
+

How many code suggestions are generated?

+

Qodo Merge uses a dynamic strategy to generate code suggestions based on the size of the pull request (PR). Here's how it works:

+

1) Chunking large PRs:

+
    +
  • Qodo Merge divides large PRs into 'chunks'.
  • +
  • Each chunk contains up to pr_code_suggestions.max_context_tokens tokens (default: 14,000).
  • +
+

2) Generating suggestions:

+
    +
  • For each chunk, Qodo Merge generates up to pr_code_suggestions.num_code_suggestions_per_chunk suggestions (default: 4).
  • +
+

This approach has two main benefits:

+
    +
  • Scalability: The number of suggestions scales with the PR size, rather than being fixed.
  • +
  • Quality: By processing smaller chunks, the AI can maintain higher quality suggestions, as larger contexts tend to decrease AI performance.
  • +
+

Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.

+

'Extra instructions' and 'best practices'

+

Extra instructions

+
+

Platforms supported: GitHub, GitLab, Bitbucket

+
+

You can use the extra_instructions configuration option to give the AI model additional instructions for the improve tool. +Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.

+

Examples for possible instructions: +

[pr_code_suggestions]
+extra_instructions="""\
+(1) Answer in japanese
+(2) Don't suggest to add try-excpet block
+(3) Ignore changes in toml files
+...
+"""
+
+Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.

+

Best practices 💎

+
+

Platforms supported: GitHub, GitLab

+
+

Another option to give additional guidance to the AI model is by creating a dedicated wiki page called best_practices.md. +This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.

+

The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: Organization +best practice.

+

Example for a best_practices.md content can be found here (adapted from Google's pyguide). +This file is only an example. Since it is used as a prompt for an AI model, we want to emphasize the following:

+
    +
  • It should be written in a clear and concise manner
  • +
  • If needed, it should give short relevant code snippets as examples
  • +
  • +

    Recommended to limit the text to 800 lines or fewer. Here’s why:

    +

    1) Extremely long best practices documents may not be fully processed by the AI model.

    +

    2) A lengthy file probably represent a more "generic" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.

    +
  • +
+
Local and global best practices
+

By default, Qodo Merge will look for a local best_practices.md wiki file in the root of the relevant local repo.

+

If you want to enable also a global best_practices.md wiki file, set first in the global configuration file:

+
[best_practices]
+enable_global_best_practices = true
+
+

Then, create a best_practices.md wiki file in the root of global configuration repository, pr-agent-settings.

+
Example results
+

best_practice

+

How to combine extra instructions and best practices

+

The extra instructions configuration is more related to the improve tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)

+

In contrast, the best_practices.md file is a general guideline for the way code should be written in the repo.

+

Using a combination of both can help the AI model to provide relevant and tailored suggestions.

+

Configuration options

+
+General options +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
extra_instructionsOptional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
commitable_code_suggestionsIf set to true, the tool will display the suggestions as commitable code comments. Default is false.
dual_publishing_score_thresholdMinimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).
persistent_commentIf set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.
self_reflect_on_suggestionsIf set to true, the improve tool will calculate an importance score for each suggestion [1-10], and sort the suggestion labels group based on this score. Default is true.
suggestions_score_threshold Any suggestion with importance score less than this threshold will be removed. Default is 0. Highly recommend not to set this value above 7-8, since above it may clip relevant suggestions that can be useful.
apply_suggestions_checkbox Enable the checkbox to create a committable suggestion. Default is true.
enable_help_textIf set to true, the tool will display a help text in the comment. Default is true.
enable_chat_textIf set to true, the tool will display a reference to the PR chat in the comment. Default is true.

+
+
+Params for number of suggestions and AI calls +

+ + + + + + + + + + + + + + + + +
auto_extended_modeEnable chunking the PR code and running the tool on each chunk. Default is true.
num_code_suggestions_per_chunkNumber of code suggestions provided by the 'improve' tool, per chunk. Default is 4.
max_number_of_callsMaximum number of chunks. Default is 3.
rank_extended_suggestionsIf set to true, the tool will rank the suggestions, based on importance. Default is true.

+
+

A note on code suggestions quality

+
    +
  • AI models for code are getting better and better (Sonnet-3.5 and GPT-4), but they are not flawless. Not all the suggestions will be perfect, and a user should not accept all of them automatically. Critical reading and judgment are required.
  • +
  • While mistakes of the AI are rare but can happen, a real benefit from the suggestions of the improve (and review) tool is to catch, with high probability, mistakes or bugs done by the PR author, when they happen. So, it's a good practice to spend the needed ~30-60 seconds to review the suggestions, even if not all of them are always relevant.
  • +
  • +

    The hierarchical structure of the suggestions is designed to help the user to quickly understand them, and to decide which ones are relevant and which are not:

    +
      +
    • Only if the Category header is relevant, the user should move to the summarized suggestion description
    • +
    • Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
    • +
    +
  • +
  • +

    In addition, we recommend to use the extra_instructions field to guide the model to suggestions that are more relevant to the specific needs of the project.

    +
  • +
  • The interactive PR chat also provides an easy way to get more tailored suggestions and feedback from the AI model.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/improve_component/index.html b/tools/improve_component/index.html new file mode 100644 index 000000000..2f9219914 --- /dev/null +++ b/tools/improve_component/index.html @@ -0,0 +1,2183 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Improve Component - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Improve Component

+ +

Overview

+

The improve_component tool generates code suggestions for a specific code component that changed in the PR. +it can be invoked manually by commenting on any PR: +

/improve_component component_name
+

+

To get a list of the components that changed in the PR and choose the relevant component interactively, use the analyze tool.

+

Example usage

+

Invoke the tool manually by commenting /improve_component on any PR:

+

improve_component1

+

The tool will generate code suggestions for the selected component (if no component is stated, it will generate code suggestions for the largest component):

+

improve_component2

+

Notes +- Language that are currently supported by the tool: Python, Java, C++, JavaScript, TypeScript, C#. +- This tool can also be triggered interactively by using the analyze tool.

+

Configuration options

+
    +
  • num_code_suggestions: number of code suggestions to provide. Default is 4
  • +
  • extra_instructions: Optional extra instructions to the tool. For example: "focus on ...".
  • +
  • file: in case there are several components with the same name, you can specify the relevant file.
  • +
  • class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/index.html b/tools/index.html new file mode 100644 index 000000000..b866f36ec --- /dev/null +++ b/tools/index.html @@ -0,0 +1,2131 @@ + + + + + + + + + + + + + + + + + + + + + + + Tools - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Tools

+

Here is a list of Qodo Merge tools, each with a dedicated page that explains how to use it:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ToolDescription
PR Description (/describe)Automatically generating PR description - title, type, summary, code walkthrough and labels
PR Review (/review)Adjustable feedback about the PR, possible issues, security concerns, review effort and more
Code Suggestions (/improve)Code suggestions for improving the PR
Question Answering (/ask ...)Answering free-text questions about the PR, or on specific code lines
Update Changelog (/update_changelog)Automatically updating the CHANGELOG.md file with the PR changes
Find Similar Issue (/similar_issue)Automatically retrieves and presents similar issues
Help (/help)Provides a list of all the available tools. Also enables to trigger them interactively (💎)
💎 Add Documentation (/add_docs)Generates documentation to methods/functions/classes that changed in the PR
💎 Generate Custom Labels (/generate_labels)Generates custom labels for the PR, based on specific guidelines defined by the user
💎 Analyze (/analyze)Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component
💎 Custom Prompt (/custom_prompt)Automatically generates custom suggestions for improving the PR code, based on specific guidelines defined by the user
💎 Generate Tests (/test component_name)Automatically generates unit tests for a selected component, based on the PR code changes
💎 Improve Component (/improve_component component_name)Generates code suggestions for a specific code component that changed in the PR
💎 CI Feedback (/checks ci_job)Automatically generates feedback and analysis for a failed CI job
+

Note that the tools marked with 💎 are available only for Qodo Merge Pro users.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/review/index.html b/tools/review/index.html new file mode 100644 index 000000000..5e0d15002 --- /dev/null +++ b/tools/review/index.html @@ -0,0 +1,2408 @@ + + + + + + + + + + + + + + + + + + + + + + + Review - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Review

+ +

Overview

+

The review tool scans the PR code changes, and generates a list of feedbacks about the PR, aiming to aid the reviewing process. +
+The tool can be triggered automatically every time a new PR is opened, or can be invoked manually by commenting on any PR: +

/review
+

+

Note that the main purpose of the review tool is to provide the PR reviewer with useful feedbacks and insights. The PR author, in contrast, may prefer to save time and focus on the output of the improve tool, which provides actionable code suggestions.

+

(Read more about the different personas in the PR process and how Qodo Merge aims to assist them in our blog)

+

Example usage

+

Manual triggering

+

Invoke the tool manually by commenting /review on any PR:

+

review comment

+

After ~30 seconds, the tool will generate a review for the PR:

+

review

+

If you want to edit configurations, add the relevant ones to the command: +

/review --pr_reviewer.some_config1=... --pr_reviewer.some_config2=...
+

+

Automatic triggering

+

To run the review automatically when a PR is opened, define in a configuration file: +

[github_app]
+pr_commands = [
+    "/review",
+    ...
+]
+
+[pr_reviewer]
+num_code_suggestions = ...
+...
+

+
    +
  • The pr_commands lists commands that will be executed automatically when a PR is opened.
  • +
  • The [pr_reviewer] section contains the configurations for the review tool you want to edit (if any).
  • +
+

Configuration options

+
+

General options

+
+ + + + + + + + + + + + + + + + + + + + + +
num_code_suggestionsNumber of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool.
inline_code_commentsIf set to true, the tool will publish the code suggestions as comments on the code diff. Default is false. Note that you need to set `num_code_suggestions`>0 to get code suggestions
persistent_commentIf set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.
extra_instructionsOptional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
enable_help_textIf set to true, the tool will display a help text in the comment. Default is true.
+ +
+

Enable\disable specific sub-sections

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
require_score_reviewIf set to true, the tool will add a section that scores the PR. Default is false.
require_tests_reviewIf set to true, the tool will add a section that checks if the PR contains tests. Default is true.
require_estimate_effort_to_reviewIf set to true, the tool will add a section that estimates the effort needed to review the PR. Default is true.
require_can_be_split_reviewIf set to true, the tool will add a section that checks if the PR contains several themes, and can be split into smaller PRs. Default is false.
require_security_reviewIf set to true, the tool will add a section that checks if the PR contains a possible security or vulnerability issue. Default is true.
require_ticket_analysis_reviewIf set to true, and the PR contains a GitHub ticket number, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.
+ +
+

Adding PR labels

+
+

You can enable\disable the review tool to add specific labels to the PR:

+ + + + + + + + + +
enable_review_labels_securityIf set to true, the tool will publish a 'possible security issue' label if it detects a security issue. Default is true.
enable_review_labels_effortIf set to true, the tool will publish a 'Review effort [1-5]: x' label. Default is true.
+ +
+

Auto-approval

+
+

If enabled, the review tool can approve a PR when a specific comment, /review auto_approve, is invoked.

+ + + + + + + + + +
enable_auto_approvalIf set to true, the tool will approve the PR when invoked with the 'auto_approve' command. Default is false. This flag can be changed only from a configuration file.
maximal_review_effortMaximal effort level for auto-approval. If the PR's estimated review effort is above this threshold, the auto-approval will not run. Default is 5.
+ +

Usage Tips

+
+

General guidelines

+

The review tool provides a collection of configurable feedbacks about a PR. +It is recommended to review the Configuration options section, and choose the relevant options for your use case.

+

Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example: +require_score_review, and more.

+

On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.

+
+
+

Automation

+

When you first install Qodo Merge app, the default mode for the review tool is: +

pr_commands = ["/review --pr_reviewer.num_code_suggestions=0", ...]
+
+Meaning the review tool will run automatically on every PR, without providing code suggestions. +Edit this field to enable/disable the tool, or to change the configurations used.

+
+
+

Possible labels from the review tool

+

The review tool can auto-generate two specific types of labels for a PR:

+
    +
  • a possible security issue label that detects if a possible security issue exists in the PR code (enable_review_labels_security flag)
  • +
  • a Review effort [1-5]: x label, where x is the estimated effort to review the PR (enable_review_labels_effort flag)
  • +
+

Both modes are useful, and we recommended to enable them.

+
+
+

Extra instructions

+

Extra instructions are important. +The review tool can be configured with extra instructions, which can be used to guide the model to a feedback tailored to the needs of your project.

+

Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify the relevant sub-tool, and the relevant aspects of the PR that you want to emphasize.

+

Examples of extra instructions: +

[pr_reviewer]
+extra_instructions="""\
+In the code feedback section, emphasize the following:
+- Does the code logic cover relevant edge cases?
+- Is the code logic clear and easy to understand?
+- Is the code logic efficient?
+...
+"""
+
+Use triple quotes to write multi-line instructions. Use bullet points to make the instructions more readable.

+
+
+

Auto-approval

+

Qodo Merge can approve a PR when a specific comment is invoked.

+

To ensure safety, the auto-approval feature is disabled by default. To enable auto-approval, you need to actively set in a pre-defined configuration file the following: +

[pr_reviewer]
+enable_auto_approval = true
+
+(this specific flag cannot be set with a command line argument, only in the configuration file, committed to the repository)

+

After enabling, by commenting on a PR: +

/review auto_approve
+
+Qodo Merge will automatically approve the PR, and add a comment with the approval.

+

You can also enable auto-approval only if the PR meets certain requirements, such as that the estimated_review_effort label is equal or below a certain threshold, by adjusting the flag: +

[pr_reviewer]
+maximal_review_effort = 5
+

+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/similar_code/index.html b/tools/similar_code/index.html new file mode 100644 index 000000000..9f76a0a8b --- /dev/null +++ b/tools/similar_code/index.html @@ -0,0 +1,2254 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Similar Code - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Similar Code

+ +

Overview

+

The similar code tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.

+

For example:

+

Global Search for a method called chat_completion:

+

similar code global

+

Qodo Merge will examine the code component and will extract the most relevant keywords to search for similar code:

+
    +
  • extracted keywords: the keywords that were extracted from the code by Qodo Merge. the link will open a search page with the extracted keywords, to allow the user to modify the search if needed.
  • +
  • search context: the context in which the search will be performed, organization's codebase or open-source code (Global).
  • +
  • similar code: the most similar code components found. the link will open the code component in the relevant file.
  • +
  • relevant repositories: the open-source repositories in which that are relevant to the searched code component and it's keywords.
  • +
+

Search result link example:

+

code search result single

+

Organization Search:

+

similar code org

+

How to use

+

Manually

+

To invoke the similar code tool manually, comment on the PR: +

/find_similar_component COMPONENT_NAME
+
+Where COMPONENT_NAME should be the name of a code component in the PR (class, method, function).

+

If there is a name ambiguity, there are two configurations that will help the tool to find the correct component:

+
    +
  • --pr_find_similar_component.file: in case there are several components with the same name, you can specify the relevant file.
  • +
  • --pr_find_similar_component.class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.
  • +
+

example: +

/find_similar_component COMPONENT_NAME --pr_find_similar_component.file=FILE_NAME
+

+

Automatically (via Analyze table)

+

It can be invoked automatically from the analyze table, can be accessed by: +

/analyze
+
+Choose the components you want to find similar code for, and click on the similar checkbox. +analyze similar

+

If you are looking to search for similar code in the organization's codebase, you can click on the Organization checkbox, and it will invoke a new search command just for the organization's codebase.

+

similar code global

+

Configuration options

+
    +
  • search_from_org: if set to true, the tool will search for similar code in the organization's codebase. Default is false.
  • +
  • number_of_keywords: number of keywords to use for the search. Default is 5.
  • +
  • number_of_results: the maximum number of results to present. Default is 5.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/similar_issues/index.html b/tools/similar_issues/index.html new file mode 100644 index 000000000..ba9c15b19 --- /dev/null +++ b/tools/similar_issues/index.html @@ -0,0 +1,2196 @@ + + + + + + + + + + + + + + + + + + + + + + + Similar Issues - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Similar Issues

+ +

Overview

+

The similar issue tool retrieves the most similar issues to the current issue. +It can be invoked manually by commenting on any PR: +

/similar_issue
+

+

Example usage

+

similar_issue_original_issue

+

similar_issue_comment

+

similar_issue

+

Note that to perform retrieval, the similar_issue tool indexes all the repo previous issues (once).

+

Select VectorDBs by changing pr_similar_issue parameter in configuration.toml file

+

2 VectorDBs are available to switch in +1. LanceDB +2. Pinecone

+

To enable usage of the 'similar issue' tool for Pinecone, you need to set the following keys in .secrets.toml (or in the relevant environment variables):

+

[pinecone]
+api_key = "..."
+environment = "..."
+
+These parameters can be obtained by registering to Pinecone.

+

How to use

+
    +
  • +

    To invoke the 'similar issue' tool from CLI, run: +python3 cli.py --issue_url=... similar_issue

    +
  • +
  • +

    To invoke the 'similar' issue tool via online usage, comment on a PR: +/similar_issue

    +
  • +
  • +

    You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the pr_commands list in the github_app section

    +
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/test/index.html b/tools/test/index.html new file mode 100644 index 000000000..0303039b3 --- /dev/null +++ b/tools/test/index.html @@ -0,0 +1,2187 @@ + + + + + + + + + + + + + + + + + + + + + + + 💎 Test - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

💎 Test

+ +

Overview

+

By combining LLM abilities with static code analysis, the test tool generate tests for a selected component, based on the PR code changes. +It can be invoked manually by commenting on any PR: +

/test component_name
+
+where 'component_name' is the name of a specific component in the PR. +To get a list of the components that changed in the PR and choose the relevant component interactively, use the analyze tool.

+

Example usage

+

Invoke the tool manually by commenting /test on any PR: +The tool will generate tests for the selected component (if no component is stated, it will generate tests for largest component):

+

test1

+

(Example taken from here):

+

Notes +- Language that are currently supported by the tool: Python, Java, C++, JavaScript, TypeScript, C#. +- This tool can also be triggered interactively by using the analyze tool.

+

Configuration options

+
    +
  • num_tests: number of tests to generate. Default is 3.
  • +
  • testing_framework: the testing framework to use. If not set, for Python it will use pytest, for Java it will use JUnit, for C++ it will use Catch2, and for JavaScript and TypeScript it will use jest.
  • +
  • avoid_mocks: if set to true, the tool will try to avoid using mocks in the generated tests. Note that even if this option is set to true, the tool might still use mocks if it cannot generate a test without them. Default is true.
  • +
  • extra_instructions: Optional extra instructions to the tool. For example: "use the following mock injection scheme: ...".
  • +
  • file: in case there are several components with the same name, you can specify the relevant file.
  • +
  • class_name: in case there are several methods with the same name in the same file, you can specify the relevant class name.
  • +
  • enable_help_text: if set to true, the tool will add a help text to the PR comment. Default is true.
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tools/update_changelog/index.html b/tools/update_changelog/index.html new file mode 100644 index 000000000..64d8795c3 --- /dev/null +++ b/tools/update_changelog/index.html @@ -0,0 +1,2176 @@ + + + + + + + + + + + + + + + + + + + + + + + Update Changelog - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Update Changelog

+ +

Overview

+

The update_changelog tool automatically updates the CHANGELOG.md file with the PR changes. +It can be invoked manually by commenting on any PR: +

/update_changelog
+

+

Example usage

+

update_changelog_comment

+

update_changelog

+

Configuration options

+

Under the section pr_update_changelog, the configuration file contains options to customize the 'update changelog' tool:

+
    +
  • push_changelog_changes: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
  • +
  • extra_instructions: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
  • +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/EXAMPLE_BEST_PRACTICE/index.html b/usage-guide/EXAMPLE_BEST_PRACTICE/index.html new file mode 100644 index 000000000..7fdcb5c38 --- /dev/null +++ b/usage-guide/EXAMPLE_BEST_PRACTICE/index.html @@ -0,0 +1,2276 @@ + + + + + + + + + + + + + + + + + + + EXAMPLE BEST PRACTICE - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

EXAMPLE BEST PRACTICE

+ +

Recommend Python Best Practices

+

This document outlines a series of recommended best practices for Python development. These guidelines aim to improve code quality, maintainability, and readability.

+

Imports

+

Use import statements for packages and modules only, not for individual types, classes, or functions.

+

Definition

+

Reusability mechanism for sharing code from one module to another.

+

Decision

+
    +
  • Use import x for importing packages and modules.
  • +
  • Use from x import y where x is the package prefix and y is the module name with no prefix.
  • +
  • Use from x import y as z in any of the following circumstances:
      +
    • Two modules named y are to be imported.
    • +
    • y conflicts with a top-level name defined in the current module.
    • +
    • y conflicts with a common parameter name that is part of the public API (e.g., features).
    • +
    • y is an inconveniently long name, or too generic in the context of your code
    • +
    +
  • +
  • Use import y as z only when z is a standard abbreviation (e.g., import numpy as np).
  • +
+

For example the module sound.effects.echo may be imported as follows:

+
from sound.effects import echo
+...
+echo.EchoFilter(input, output, delay=0.7, atten=4)
+
+

Do not use relative names in imports. Even if the module is in the same package, use the full package name. This helps prevent unintentionally importing a package twice.

+
Exemptions
+

Exemptions from this rule:

+ +

Packages

+

Import each module using the full pathname location of the module.

+

Decision

+

All new code should import each module by its full package name.

+

Imports should be as follows:

+
Yes:
+  # Reference absl.flags in code with the complete name (verbose).
+  import absl.flags
+  from doctor.who import jodie
+
+  _FOO = absl.flags.DEFINE_string(...)
+
+
Yes:
+  # Reference flags in code with just the module name (common).
+  from absl import flags
+  from doctor.who import jodie
+
+  _FOO = flags.DEFINE_string(...)
+
+

(assume this file lives in doctor/who/ where jodie.py also exists)

+
No:
+  # Unclear what module the author wanted and what will be imported.  The actual
+  # import behavior depends on external factors controlling sys.path.
+  # Which possible jodie module did the author intend to import?
+  import jodie
+
+

The directory the main binary is located in should not be assumed to be in sys.path despite that happening in some environments. This being the case, code should assume that import jodie refers to a third-party or top-level package named jodie, not a local jodie.py.

+

Default Iterators and Operators

+

Use default iterators and operators for types that support them, like lists, dictionaries, and files.

+

Definition

+

Container types, like dictionaries and lists, define default iterators and membership test operators (“in” and “not in”).

+

Decision

+

Use default iterators and operators for types that support them, like lists, dictionaries, and files. The built-in types define iterator methods, too. Prefer these methods to methods that return lists, except that you should not mutate a container while iterating over it.

+
Yes:  for key in adict: ...
+      if obj in alist: ...
+      for line in afile: ...
+      for k, v in adict.items(): ...
+
+
No:   for key in adict.keys(): ...
+      for line in afile.readlines(): ...
+
+

Lambda Functions

+

Okay for one-liners. Prefer generator expressions over map() or filter() with a lambda.

+

Decision

+

Lambdas are allowed. If the code inside the lambda function spans multiple lines or is longer than 60-80 chars, it might be better to define it as a regular nested function.

+

For common operations like multiplication, use the functions from the operator module instead of lambda functions. For example, prefer operator.mul to lambda x, y: x * y.

+

Default Argument Values

+

Okay in most cases.

+

Definition

+

You can specify values for variables at the end of a function’s parameter list, e.g., def foo(a, b=0):. If foo is called with only one argument, b is set to 0. If it is called with two arguments, b has the value of the second argument.

+

Decision

+

Okay to use with the following caveat:

+

Do not use mutable objects as default values in the function or method definition.

+
Yes: def foo(a, b=None):
+         if b is None:
+             b = []
+Yes: def foo(a, b: Sequence | None = None):
+         if b is None:
+             b = []
+Yes: def foo(a, b: Sequence = ()):  # Empty tuple OK since tuples are immutable.
+         ...
+
+
from absl import flags
+_FOO = flags.DEFINE_string(...)
+
+No:  def foo(a, b=[]):
+         ...
+No:  def foo(a, b=time.time()):  # Is `b` supposed to represent when this module was loaded?
+         ...
+No:  def foo(a, b=_FOO.value):  # sys.argv has not yet been parsed...
+         ...
+No:  def foo(a, b: Mapping = {}):  # Could still get passed to unchecked code.
+         ...
+
+

True/False Evaluations

+

Use the “implicit” false if possible, e.g., if foo: rather than if foo != []:

+

Lexical Scoping

+

Okay to use.

+

An example of the use of this feature is:

+
def get_adder(summand1: float) -> Callable[[float], float]:
+    """Returns a function that adds numbers to a given number."""
+    def adder(summand2: float) -> float:
+        return summand1 + summand2
+
+    return adder
+
+

Decision

+

Okay to use.

+

Threading

+

Do not rely on the atomicity of built-in types.

+

While Python’s built-in data types such as dictionaries appear to have atomic operations, there are corner cases where they aren’t atomic (e.g. if __hash__ or __eq__ are implemented as Python methods) and their atomicity should not be relied upon. Neither should you rely on atomic variable assignment (since this in turn depends on dictionaries).

+

Use the queue module’s Queue data type as the preferred way to communicate data between threads. Otherwise, use the threading module and its locking primitives. Prefer condition variables and threading.Condition instead of using lower-level locks.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/PR_agent_pro_models/index.html b/usage-guide/PR_agent_pro_models/index.html new file mode 100644 index 000000000..2b12d7182 --- /dev/null +++ b/usage-guide/PR_agent_pro_models/index.html @@ -0,0 +1,2081 @@ + + + + + + + + + + + + + + + + + + + PR agent pro models - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

PR agent pro models

+ +

Qodo Merge Pro Models

+

The default models used by Qodo Merge Pro are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.

+

Users can configure Qodo Merge Pro to use solely a specific model by editing the configuration file.

+

For example, to restrict Qodo Merge Pro to using only Claude-3.5-sonnet, add this setting:

+
[config]
+model="claude-3-5-sonnet"
+
+

Or to restrict Qodo Merge Pro to using only GPT-4o, add this setting: +

[config]
+model="gpt-4o"
+

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/additional_configurations/index.html b/usage-guide/additional_configurations/index.html new file mode 100644 index 000000000..4aed5c4c0 --- /dev/null +++ b/usage-guide/additional_configurations/index.html @@ -0,0 +1,2386 @@ + + + + + + + + + + + + + + + + + + + + + + + Additional Configurations - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Additional Configurations

+ +

Show possible configurations

+

The possible configurations of Qodo Merge are stored in here. +In the tools page you can find explanations on how to use these configurations for each tool.

+

To print all the available configurations as a comment on your PR, you can use the following command: +

/config
+

+

possible_config1

+

To view the actual configurations used for a specific tool, after all the user settings are applied, you can add for each tool a --config.output_relevant_configurations=true suffix. +For example: +

/improve --config.output_relevant_configurations=true
+
+Will output an additional field showing the actual configurations used for the improve tool.

+

possible_config2

+

Ignoring files from analysis

+

In some cases, you may want to exclude specific files or directories from the analysis performed by Qodo Merge. This can be useful, for example, when you have files that are generated automatically or files that shouldn't be reviewed, like vendor code.

+

You can ignore files or folders using the following methods: + - IGNORE.GLOB + - IGNORE.REGEX

+

which you can edit to ignore files or folders based on glob or regex patterns.

+

Example usage

+

Let's look at an example where we want to ignore all files with .py extension from the analysis.

+

To ignore Python files in a PR with online usage, comment on a PR: +/review --ignore.glob="['*.py']"

+

To ignore Python files in all PRs using glob pattern, set in a configuration file: +

[ignore]
+glob = ['*.py']
+

+

And to ignore Python files in all PRs using regex pattern, set in a configuration file: +

[regex]
+regex = ['.*\.py$']
+

+

Extra instructions

+

All Qodo Merge tools have a parameter called extra_instructions, that enables to add free-text extra instructions. Example usage: +

/update_changelog --pr_update_changelog.extra_instructions="Make sure to update also the version ..."
+

+

Working with large PRs

+

The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens. +This mode provides a very good speed-quality-cost tradeoff, and can handle most PRs successfully. +When the PR is above the token limit, it employs a PR Compression strategy.

+

However, for very large PRs, or in case you want to emphasize quality over speed and cost, there are two possible solutions: +1) Use a model with larger context, like GPT-32K, or claude-100K. This solution will be applicable for all the tools. +2) For the /improve tool, there is an 'extended' mode (/improve --extended), +which divides the PR into chunks, and processes each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur)

+

Patch Extra Lines

+

By default, around any change in your PR, git patch provides three lines of context above and below the change. +

@@ -12,5 +12,5 @@ def func1():
+ code line that already existed in the file...
+ code line that already existed in the file...
+ code line that already existed in the file....
+-code line that was removed in the PR
++new code line added in the PR
+ code line that already existed in the file...
+ code line that already existed in the file...
+ code line that already existed in the file...
+

+

Qodo Merge will try to increase the number of lines of context, via the parameter: +

[config]
+patch_extra_lines_before=3
+patch_extra_lines_after=1
+

+

Increasing this number provides more context to the model, but will also increase the token budget, and may overwhelm the model with too much information, unrelated to the actual PR code changes.

+

If the PR is too large (see PR Compression strategy), Qodo Merge may automatically set this number to 0, and will use the original git patch.

+

Editing the prompts

+

The prompts for the various Qodo Merge tools are defined in the pr_agent/settings folder. +In practice, the prompts are loaded and stored as a standard setting object. +Hence, editing them is similar to editing any other configuration value - just place the relevant key in .pr_agent.tomlfile, and override the default value.

+

For example, if you want to edit the prompts of the describe tool, you can add the following to your .pr_agent.toml file: +

[pr_description_prompt]
+system="""
+...
+"""
+user="""
+...
+"""
+
+Note that the new prompt will need to generate an output compatible with the relevant post-process function.

+

Integrating with Logging Observability Platforms

+

Various logging observability tools can be used out-of-the box when using the default LiteLLM AI Handler. Simply configure the LiteLLM callback settings in configuration.toml and set environment variables according to the LiteLLM documentation.

+

For example, to use LangSmith you can add the following to your configuration.toml file: +

[litellm]
+enable_callbacks = true
+success_callback = ["langsmith"]
+failure_callback = ["langsmith"]
+service_callback = []
+

+

Then set the following environment variables:

+
LANGSMITH_API_KEY=<api_key>
+LANGSMITH_PROJECT=<project>
+LANGSMITH_BASE_URL=<url>
+
+

Ignoring automatic commands in PRs

+

In some cases, you may want to automatically ignore specific PRs . Qodo Merge enables you to ignore PR with a specific title, or from/to specific branches (regex matching).

+

To ignore PRs with a specific title such as "[Bump]: ...", you can add the following to your configuration.toml file:

+
[config]
+ignore_pr_title = ["\\[Bump\\]"]
+
+

Where the ignore_pr_title is a list of regex patterns to match the PR title you want to ignore. Default is ignore_pr_title = ["^\\[Auto\\]", "^Auto"].

+

To ignore PRs from specific source or target branches, you can add the following to your configuration.toml file:

+
[config]
+ignore_pr_source_branches = ['develop', 'main', 'master', 'stage']
+ignore_pr_target_branches = ["qa"]
+
+

Where the ignore_pr_source_branches and ignore_pr_target_branches are lists of regex patterns to match the source and target branches you want to ignore. +They are not mutually exclusive, you can use them together or separately.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/automations_and_usage/index.html b/usage-guide/automations_and_usage/index.html new file mode 100644 index 000000000..ec85d086c --- /dev/null +++ b/usage-guide/automations_and_usage/index.html @@ -0,0 +1,2528 @@ + + + + + + + + + + + + + + + + + + + + + + + Usage and Automation - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Usage and Automation

+ +

Local repo (CLI)

+

When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used. +Examples of invoking the different tools via the CLI:

+
    +
  • Review: python -m pr_agent.cli --pr_url=<pr_url> review
  • +
  • Describe: python -m pr_agent.cli --pr_url=<pr_url> describe
  • +
  • Improve: python -m pr_agent.cli --pr_url=<pr_url> improve
  • +
  • Ask: python -m pr_agent.cli --pr_url=<pr_url> ask "Write me a poem about this PR"
  • +
  • Reflect: python -m pr_agent.cli --pr_url=<pr_url> reflect
  • +
  • Update Changelog: python -m pr_agent.cli --pr_url=<pr_url> update_changelog
  • +
+

<pr_url> is the url of the relevant PR (for example: #50).

+

Notes:

+

(1) in addition to editing your local configuration file, you can also change any configuration value by adding it to the command line: +

python -m pr_agent.cli --pr_url=<pr_url>  /review --pr_reviewer.extra_instructions="focus on the file: ..."
+

+

(2) You can print results locally, without publishing them, by setting in configuration.toml: +

[config]
+publish_output=false
+verbosity_level=2
+
+This is useful for debugging or experimenting with different tools.

+

(3)

+

git provider: The git_provider field in a configuration file determines the GIT provider that will be used by Qodo Merge. Currently, the following providers are supported: +"github", "gitlab", "bitbucket", "azure", "codecommit", "local", "gerrit"

+

Default is "github".

+

Online usage

+

Online usage means invoking Qodo Merge tools by comments on a PR. +Commands for invoking the different tools via comments:

+
    +
  • Review: /review
  • +
  • Describe: /describe
  • +
  • Improve: /improve (or /improve_code for bitbucket, since /improve is sometimes reserved)
  • +
  • Ask: /ask "..."
  • +
  • Reflect: /reflect
  • +
  • Update Changelog: /update_changelog
  • +
+

To edit a specific configuration value, just add --config_path=<value> to any command. +For example, if you want to edit the review tool configurations, you can run: +

/review --pr_reviewer.extra_instructions="..." --pr_reviewer.require_score_review=false
+
+Any configuration value in configuration file file can be similarly edited. Comment /config to see the list of available configurations.

+

GitHub App

+
+

Configurations for Qodo Merge Pro

+

Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users. +Same goes for GitLab webhook and BitBucket App sections.

+
+

GitHub app automatic tools when a new PR is opened

+

The github_app section defines GitHub app specific configurations.

+

The configuration parameter pr_commands defines the list of tools that will be run automatically when a new PR is opened. +

[github_app]
+pr_commands = [
+    "/describe --pr_description.final_update_message=false",
+    "/review --pr_reviewer.num_code_suggestions=0",
+    "/improve",
+]
+
+This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the describe, review and improve tools.
+For the review tool, for example, the num_code_suggestions parameter will be set to 0.

+

You can override the default tool parameters by using one the three options for a configuration file: wiki, local, or global. +For example, if your local .pr_agent.toml file contains: +

[pr_description]
+generate_ai_title = true
+
+Every time you run the describe tool, including automatic runs, the PR title will be generated by the AI.

+

To cancel the automatic run of all the tools, set: +

[github_app]
+pr_commands = []
+

+

GitHub app automatic tools for push actions (commits to an open PR)

+

In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.

+

The configuration toggle handle_push_trigger can be used to enable this feature.
+The configuration parameter push_commands defines the list of tools that will be run automatically when new code is pushed to the PR. +

[github_app]
+handle_push_trigger = true
+push_commands = [
+    "/describe",
+    "/review  --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
+]
+
+This means that when new code is pushed to the PR, the Qodo Merge will run the describe and review tools, with the specified parameters.

+

GitHub Action

+

GitHub Action is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than GitHub App.
+You can configure settings for GitHub Action by adding environment variables under the env section in .github/workflows/pr_agent.yml file. +Specifically, start by setting the following environment variables: +

      env:
+        OPENAI_KEY: ${{ secrets.OPENAI_KEY }} # Make sure to add your OpenAI key to your repo secrets
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Make sure to add your GitHub token to your repo secrets
+        github_action_config.auto_review: "true" # enable\disable auto review
+        github_action_config.auto_describe: "true" # enable\disable auto describe
+        github_action_config.auto_improve: "true" # enable\disable auto improve
+        github_action_config.pr_actions: ["opened", "reopened", "ready_for_review", "review_requested"]
+
+github_action_config.auto_review, github_action_config.auto_describe and github_action_config.auto_improve are used to enable/disable automatic tools that run when a new PR is opened. +If not set, the default configuration is for all three tools to run automatically when a new PR is opened.

+

github_action_config.pr_actions is used to configure which pull_requests events will trigger the enabled auto flags +If not set, the default configuration is ["opened", "reopened", "ready_for_review", "review_requested"]

+

github_action_config.enable_output are used to enable/disable github actions output parameter (default is true). +Review result is output as JSON to steps.{step-id}.outputs.review property. +The JSON structure is equivalent to the yaml data structure defined in pr_reviewer_prompts.toml.

+

Note that you can give additional config parameters by adding environment variables to .github/workflows/pr_agent.yml, or by using a .pr_agent.toml configuration file in the root of your repo

+

For example, you can set an environment variable: pr_description.publish_labels=false, or add a .pr_agent.toml file with the following content: +

[pr_description]
+publish_labels = false
+
+to prevent Qodo Merge from publishing labels when running the describe tool.

+

GitLab Webhook

+

After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the pr_commands parameter in the configuration file, similar to the GitHub App: +

[gitlab]
+pr_commands = [
+    "/describe",
+    "/review --pr_reviewer.num_code_suggestions=0",
+    "/improve",
+]
+

+

the GitLab webhook can also respond to new code that is pushed to an open MR. +The configuration toggle handle_push_trigger can be used to enable this feature.
+The configuration parameter push_commands defines the list of tools that will be run automatically when new code is pushed to the MR. +

[gitlab]
+handle_push_trigger = true
+push_commands = [
+    "/describe",
+    "/review  --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
+]
+

+

Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope.

+

BitBucket App

+

Similar to GitHub app, when running Qodo Merge from BitBucket App, the default configuration file from a pre-built docker will be initially loaded.

+

By uploading a local .pr_agent.toml file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload .pr_agent.toml prior to creating a PR, in order for the configuration to take effect.

+

For example, if your local .pr_agent.toml file contains: +

[pr_reviewer]
+extra_instructions = "Answer in japanese"
+

+

Each time you invoke a /review tool, it will use the extra instructions you set in the local configuration file.

+

Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage. +If you experience lack of responses from Qodo Merge, you might want to set: bitbucket_app.avoid_full_files=true in your configuration file. +This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.

+

BitBucket Self-Hosted App automatic tools

+

To control which commands will run automatically when a new PR is opened, you can set the pr_commands parameter in the configuration file: +Specifically, set the following values:

+

[bitbucket_app]
+pr_commands = [
+    "/review --pr_reviewer.num_code_suggestions=0",
+    "/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
+]
+
+Note that we set specifically for bitbucket, we recommend using: --pr_code_suggestions.suggestions_score_threshold=7 and that is the default value we set for bitbucket. +Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.

+

Azure DevOps provider

+

To use Azure DevOps provider use the following settings in configuration.toml: +

[config]
+git_provider="azure"
+

+

Azure DevOps provider supports PAT token or DefaultAzureCredential authentication. +PAT is faster to create, but has build in expiration date, and will use the user identity for API calls. +Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.

+

If PAT was chosen, you can assign the value in .secrets.toml. +If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly, +or use managed identity/az cli (for local development) without any additional configuration. +in any case, 'org' value must be assigned in .secrets.toml: +

[azure_devops]
+org = "https://dev.azure.com/YOUR_ORGANIZATION/"
+# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
+

+

Azure DevOps Webhook

+

To control which commands will run automatically when a new PR is opened, you can set the pr_commands parameter in the configuration file, similar to the GitHub App: +

[azure_devops_server]
+pr_commands = [
+    "/describe",
+    "/review --pr_reviewer.num_code_suggestions=0",
+    "/improve",
+]
+

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/changing_a_model/index.html b/usage-guide/changing_a_model/index.html new file mode 100644 index 000000000..9a42de2b7 --- /dev/null +++ b/usage-guide/changing_a_model/index.html @@ -0,0 +1,2442 @@ + + + + + + + + + + + + + + + + + + + + + + + Changing a Model - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Changing a Model

+ +

Changing a model

+

See here for a list of available models. +To use a different model than the default (GPT-4), you need to edit in the configuration file the fields: +

[config]
+model = "..."
+model_turbo = "..."
+fallback_models = ["..."]
+

+

For models and environments not from OpenAI, you might need to provide additional keys and other parameters. +You can give parameters via a configuration file (see below for instructions), or from environment variables. See litellm documentation for the environment variables relevant per model.

+

Azure

+

To use Azure, set in your .secrets.toml (working from CLI), or in the GitHub Settings > Secrets and variables (working from GitHub App or GitHub Action): +

[openai]
+key = "" # your azure api key
+api_type = "azure"
+api_version = '2023-05-15'  # Check Azure documentation for the current API version
+api_base = ""  # The base URL for your Azure OpenAI resource. e.g. "https://<your resource name>.openai.azure.com"
+deployment_id = ""  # The deployment name you chose when you deployed the engine
+

+

and set in your configuration file: +

[config]
+model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
+model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
+fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
+

+

Hugging Face

+

Local +You can run Hugging Face models locally through either VLLM or Ollama

+

E.g. to use a new Hugging Face model locally via Ollama, set: +

[__init__.py]
+MAX_TOKENS = {
+    "model-name-on-ollama": <max_tokens>
+}
+e.g.
+MAX_TOKENS={
+    ...,
+    "ollama/llama2": 4096
+}
+
+
+[config] # in configuration.toml
+model = "ollama/llama2"
+model_turbo = "ollama/llama2"
+fallback_models=["ollama/llama2"]
+
+[ollama] # in .secrets.toml
+api_base = ... # the base url for your Hugging Face inference endpoint
+# e.g. if running Ollama locally, you may use:
+api_base = "http://localhost:11434/"
+

+

Inference Endpoints

+

To use a new model with Hugging Face Inference Endpoints, for example, set: +

[__init__.py]
+MAX_TOKENS = {
+    "model-name-on-huggingface": <max_tokens>
+}
+e.g.
+MAX_TOKENS={
+    ...,
+    "meta-llama/Llama-2-7b-chat-hf": 4096
+}
+[config] # in configuration.toml
+model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
+model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
+fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
+
+[huggingface] # in .secrets.toml
+key = ... # your Hugging Face api key
+api_base = ... # the base url for your Hugging Face inference endpoint
+
+(you can obtain a Llama2 key from here)

+

Replicate

+

To use Llama2 model with Replicate, for example, set: +

[config] # in configuration.toml
+model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
+model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
+fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
+[replicate] # in .secrets.toml
+key = ...
+
+(you can obtain a Llama2 key from here)

+

Also, review the AiHandler file for instructions on how to set keys for other models.

+

Groq

+

To use Llama3 model with Groq, for example, set: +

[config] # in configuration.toml
+model = "llama3-70b-8192"
+model_turbo = "llama3-70b-8192"
+fallback_models = ["groq/llama3-70b-8192"] 
+[groq] # in .secrets.toml
+key = ... # your Groq api key
+
+(you can obtain a Groq key from here)

+

Vertex AI

+

To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set:

+
[config] # in configuration.toml
+model = "vertex_ai/codechat-bison"
+model_turbo = "vertex_ai/codechat-bison"
+fallback_models="vertex_ai/codechat-bison"
+
+[vertexai] # in .secrets.toml
+vertex_project = "my-google-cloud-project"
+vertex_location = ""
+
+

Your application default credentials will be used for authentication so there is no need to set explicit credentials in most environments.

+

If you do want to set explicit credentials, then you can use the GOOGLE_APPLICATION_CREDENTIALS environment variable set to a path to a json credentials file.

+

Anthropic

+

To use Anthropic models, set the relevant models in the configuration section of the configuration file: +

[config]
+model="anthropic/claude-3-opus-20240229"
+model_turbo="anthropic/claude-3-opus-20240229"
+fallback_models=["anthropic/claude-3-opus-20240229"]
+

+

And also set the api key in the .secrets.toml file: +

[anthropic]
+KEY = "..."
+

+

Amazon Bedrock

+

To use Amazon Bedrock and its foundational models, add the below configuration:

+
[config] # in configuration.toml
+model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
+model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
+fallback_models=["bedrock/anthropic.claude-v2:1"]
+
+

Note that you have to add access to foundational models before using them. Please refer to this document for more details.

+

If you are using the claude-3 model, please configure the following settings as there are parameters incompatible with claude-3. +

[litellm]
+drop_params = true
+

+

AWS session is automatically authenticated from your environment, but you can also explicitly set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION_NAME environment variables. Please refer to this document for more details.

+

Custom models

+

If the relevant model doesn't appear here, you can still use it as a custom model:

+

(1) Set the model name in the configuration file: +

[config]
+model="custom_model_name"
+model_turbo="custom_model_name"
+fallback_models=["custom_model_name"]
+
+(2) Set the maximal tokens for the model: +
[config]
+custom_model_max_tokens= ...
+
+(3) Go to litellm documentation, find the model you want to use, and set the relevant environment variables.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/configuration_options/index.html b/usage-guide/configuration_options/index.html new file mode 100644 index 000000000..aff366946 --- /dev/null +++ b/usage-guide/configuration_options/index.html @@ -0,0 +1,2216 @@ + + + + + + + + + + + + + + + + + + + + + + + Configuration File - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Configuration File

+ +

The different tools and sub-tools used by Qodo Merge are adjustable via the configuration file.

+

In addition to general configuration options, each tool has its own configurations. For example, the review tool will use parameters from the pr_reviewer section in the configuration file. +See the Tools Guide for a detailed description of the different tools and their configurations.

+

There are three ways to set persistent configurations:

+
    +
  1. Wiki configuration page 💎
  2. +
  3. Local configuration file
  4. +
  5. Global configuration file 💎
  6. +
+

In terms of precedence, wiki configurations will override local configurations, and local configurations will override global configurations.

+
+

Tip1: edit only what you need

+

Your configuration file should be minimal, and edit only the relevant values. Don't copy the entire configuration options, since it can lead to legacy problems when something changes.

+
+
+

Tip2: show relevant configurations

+

If you set config.output_relevant_configurations=true, each tool will also output in a collapsible section its relevant configurations. This can be useful for debugging, or getting to know the configurations better.

+
+

Wiki configuration file 💎

+

Platforms supported: GitHub, GitLab, Bitbucket

+

With Qodo Merge Pro, you can set configurations by creating a page called .pr_agent.toml in the wiki of the repo. +The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and save.

+

wiki_configuration

+

Click here to see a short instructional video. We recommend surrounding the configuration content with triple-quotes (or ```toml), to allow better presentation when displayed in the wiki as markdown. +An example content:

+
[pr_description]
+generate_ai_title=true
+
+

Qodo Merge will know to remove the surrounding quotes when reading the configuration content.

+

Local configuration file

+

Platforms supported: GitHub, GitLab, Bitbucket, Azure DevOps

+

By uploading a local .pr_agent.toml file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload .pr_agent.toml prior to creating a PR, in order for the configuration to take effect.

+

For example, if you set in .pr_agent.toml:

+
[pr_reviewer]
+extra_instructions="""\
+- instruction a
+- instruction b
+...
+"""
+
+

Then you can give a list of extra instructions to the review tool.

+

Global configuration file 💎

+

Platforms supported: GitHub, GitLab, Bitbucket

+

If you create a repo called pr-agent-settings in your organization, it's configuration file .pr_agent.toml will be used as a global configuration file for any other repo that belongs to the same organization. +Parameters from a local .pr_agent.toml file, in a specific repo, will override the global configuration parameters.

+

For example, in the GitHub organization Codium-ai:

+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/index.html b/usage-guide/index.html new file mode 100644 index 000000000..ac2547f21 --- /dev/null +++ b/usage-guide/index.html @@ -0,0 +1,2091 @@ + + + + + + + + + + + + + + + + + + + + + + + Usage guide - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Usage guide

+

This page provides a detailed guide on how to use Qodo Merge. +It includes information on how to adjust Qodo Merge configurations, define which tools will run automatically, and other advanced configurations.

+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/introduction/index.html b/usage-guide/introduction/index.html new file mode 100644 index 000000000..716787875 --- /dev/null +++ b/usage-guide/introduction/index.html @@ -0,0 +1,2074 @@ + + + + + + + + + + + + + + + + + + + + + + + Introduction - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Introduction

+ +

After installation, there are three basic ways to invoke Qodo Merge:

+
    +
  1. Locally running a CLI command
  2. +
  3. Online usage - by commenting on a PR
  4. +
  5. Enabling Qodo Merge tools to run automatically when a new PR is opened
  6. +
+

Specifically, CLI commands can be issued by invoking a pre-built docker image, or by invoking a locally cloned repo.

+

For online usage, you will need to setup either a GitHub App or a GitHub Action (GitHub), a GitLab webhook (GitLab), or a BitBucket App (BitBucket). +These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/usage-guide/mail_notifications/index.html b/usage-guide/mail_notifications/index.html new file mode 100644 index 000000000..f4e0be144 --- /dev/null +++ b/usage-guide/mail_notifications/index.html @@ -0,0 +1,2076 @@ + + + + + + + + + + + + + + + + + + + + + + + Managing Mail Notifications - Qodo Merge (formerly known as PR-Agent) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Managing Mail Notifications

+ +

Unfortunately, it is not possible in GitHub to disable mail notifications from a specific user. +If you are subscribed to notifications for a repo with Qodo Merge, we recommend turning off notifications for PR comments, to avoid lengthy emails:

+

notifications

+

As an alternative, you can filter in your mail provider the notifications specifically from the Qodo Merge bot, see how.

+

filter_mail_notifications

+

Another option to reduce the mail overload, yet still receive notifications on Qodo Merge tools, is to disable the help collapsible section in Qodo Merge bot comments. +This can done by setting enable_help_text=false for the relevant tool in the configuration file. +For example, to disable the help text for the pr_reviewer tool, set: +

[pr_reviewer]
+enable_help_text = false
+

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + + + + +Footer + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file