From 06cdb2fcfa078bd63b98bcd83d6619efb024ad49 Mon Sep 17 00:00:00 2001 From: <> Date: Mon, 17 Jul 2023 08:06:02 +0000 Subject: [PATCH] Deployed d459bea with MkDocs version: 1.4.3 --- .nojekyll | 0 404.html | 839 +++ __pycache__/gen_ref_pages.cpython-38.pyc | Bin 0 -> 1996 bytes assets/_mkdocstrings.css | 64 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.220ee61c.min.js | 29 + assets/javascripts/bundle.220ee61c.min.js.map | 8 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.74e28a9f.min.js | 42 + .../workers/search.74e28a9f.min.js.map | 8 + assets/stylesheets/main.26e3688c.min.css | 1 + assets/stylesheets/main.26e3688c.min.css.map | 1 + assets/stylesheets/palette.ecc896b0.min.css | 1 + .../stylesheets/palette.ecc896b0.min.css.map | 1 + en/__pycache__/gen_ref_pages.cpython-38.pyc | Bin 0 -> 1996 bytes en/gen_ref_pages.py | 59 + en/how_to_guides/write_a_new_model/index.html | 927 +++ en/index.html | 1163 +++ en/installation/index.html | 960 +++ en/modelzoo/index.html | 1239 +++ en/notes/changelog/index.html | 927 +++ en/notes/code_of_conduct/index.html | 927 +++ en/notes/contributing/index.html | 1192 +++ en/notes/faq/index.html | 910 +++ en/reference/data/index.html | 973 +++ en/reference/loss/index.html | 888 +++ en/reference/models/index.html | 1416 ++++ en/tutorials/configuration/index.html | 1145 +++ en/tutorials/deployment/index.html | 1277 ++++ en/tutorials/finetune/index.html | 1080 +++ en/tutorials/modelarts/index.html | 926 +++ en/tutorials/quick_start/index.html | 1108 +++ gen_ref_pages.py | 59 + how_to_guides/write_a_new_model/index.html | 927 +++ index.html | 1163 +++ installation/index.html | 960 +++ modelzoo/index.html | 1239 +++ notes/changelog/index.html | 927 +++ notes/code_of_conduct/index.html | 927 +++ notes/contributing/index.html | 1192 +++ notes/faq/index.html | 910 +++ objects.inv | Bin 0 -> 201 bytes reference/data/index.html | 973 +++ reference/loss/index.html | 888 +++ reference/models/index.html | 1416 ++++ search/search_index.json | 1 + sitemap.xml | 131 + sitemap.xml.gz | Bin 0 -> 584 bytes tutorials/configuration/index.html | 1145 +++ tutorials/deployment/index.html | 1277 ++++ tutorials/finetune/index.html | 1080 +++ tutorials/modelarts/index.html | 926 +++ tutorials/quick_start/index.html | 1108 +++ zh/__pycache__/gen_ref_pages.cpython-38.pyc | Bin 0 -> 1996 bytes zh/gen_ref_pages.py | 59 + zh/how_to_guides/write_a_new_model/index.html | 1336 ++++ zh/index.html | 1164 +++ zh/installation/index.html | 960 +++ zh/modelzoo/index.html | 1239 +++ zh/notes/changelog/index.html | 927 +++ zh/notes/code_of_conduct/index.html | 927 +++ zh/notes/contributing/index.html | 1192 +++ zh/notes/faq/index.html | 910 +++ zh/reference/data/index.html | 973 +++ zh/reference/loss/index.html | 888 +++ zh/reference/models/index.html | 1416 ++++ zh/tutorials/configuration/index.html | 1145 +++ zh/tutorials/deployment/index.html | 1277 ++++ zh/tutorials/finetune/index.html | 1080 +++ zh/tutorials/modelarts/index.html | 926 +++ zh/tutorials/quick_start/index.html | 1106 +++ 102 files changed, 60084 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 __pycache__/gen_ref_pages.cpython-38.pyc create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.220ee61c.min.js create mode 100644 assets/javascripts/bundle.220ee61c.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.74e28a9f.min.js create mode 100644 assets/javascripts/workers/search.74e28a9f.min.js.map create mode 100644 assets/stylesheets/main.26e3688c.min.css create mode 100644 assets/stylesheets/main.26e3688c.min.css.map create mode 100644 assets/stylesheets/palette.ecc896b0.min.css create mode 100644 assets/stylesheets/palette.ecc896b0.min.css.map create mode 100644 en/__pycache__/gen_ref_pages.cpython-38.pyc create mode 100644 en/gen_ref_pages.py create mode 100644 en/how_to_guides/write_a_new_model/index.html create mode 100644 en/index.html create mode 100644 en/installation/index.html create mode 100644 en/modelzoo/index.html create mode 100644 en/notes/changelog/index.html create mode 100644 en/notes/code_of_conduct/index.html create mode 100644 en/notes/contributing/index.html create mode 100644 en/notes/faq/index.html create mode 100644 en/reference/data/index.html create mode 100644 en/reference/loss/index.html create mode 100644 en/reference/models/index.html create mode 100644 en/tutorials/configuration/index.html create mode 100644 en/tutorials/deployment/index.html create mode 100644 en/tutorials/finetune/index.html create mode 100644 en/tutorials/modelarts/index.html create mode 100644 en/tutorials/quick_start/index.html create mode 100644 gen_ref_pages.py create mode 100644 how_to_guides/write_a_new_model/index.html create mode 100644 index.html create mode 100644 installation/index.html create mode 100644 modelzoo/index.html create mode 100644 notes/changelog/index.html create mode 100644 notes/code_of_conduct/index.html create mode 100644 notes/contributing/index.html create mode 100644 notes/faq/index.html create mode 100644 objects.inv create mode 100644 reference/data/index.html create mode 100644 reference/loss/index.html create mode 100644 reference/models/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 tutorials/configuration/index.html create mode 100644 tutorials/deployment/index.html create mode 100644 tutorials/finetune/index.html create mode 100644 tutorials/modelarts/index.html create mode 100644 tutorials/quick_start/index.html create mode 100644 zh/__pycache__/gen_ref_pages.cpython-38.pyc create mode 100644 zh/gen_ref_pages.py create mode 100644 zh/how_to_guides/write_a_new_model/index.html create mode 100644 zh/index.html create mode 100644 zh/installation/index.html create mode 100644 zh/modelzoo/index.html create mode 100644 zh/notes/changelog/index.html create mode 100644 zh/notes/code_of_conduct/index.html create mode 100644 zh/notes/contributing/index.html create mode 100644 zh/notes/faq/index.html create mode 100644 zh/reference/data/index.html create mode 100644 zh/reference/loss/index.html create mode 100644 zh/reference/models/index.html create mode 100644 zh/tutorials/configuration/index.html create mode 100644 zh/tutorials/deployment/index.html create mode 100644 zh/tutorials/finetune/index.html create mode 100644 zh/tutorials/modelarts/index.html create mode 100644 zh/tutorials/quick_start/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..0d689c36 --- /dev/null +++ b/404.html @@ -0,0 +1,839 @@ + + + + + + + + + + + + + + + + + + MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/__pycache__/gen_ref_pages.cpython-38.pyc b/__pycache__/gen_ref_pages.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4260e700aec82bb40572a853fce0a3bac798332f GIT binary patch literal 1996 zcmahJO^+Kj)b@N&lKpB~SST=J1yV{$Ky43Q0YX|35`q;ULM`rE~9;e%k zCR~;)qDQ2vQucz>dw-3uTdq0t|$$j2{v&Ead1?LK1;j3`2a`p{rua%cR6`7E+5`h|upqFzINYNKk zWIYj#Z_Su8xm7W4|IC15a_0%yJ%cTH z(>tbfoBJp9n0`!t_+w^N#@ykJ6FM`MTbcVnpIMc~n_Fb=^VSJ1IAAMVgs&bj`RgHK zK;vr%w*2i3TL|dpp>b0<)v0m6zdWJJ{LT2}ViAr+J!pmOr3u)Q@{Am(xzK7K&z{ zP2D(3)G#V0{eE&=n<>}?jImUO_6vn4(zUstCz}KvG0wtX0KBi|$b_54w-zdYTN~Isw6T5qJ$=hwg!q_9DDO zbQNAIITt_-l-htzv%Ju@6r=o>Xfyc+-f!cE z-db#LBbygtVW9D-W?{p&FkW^6>Y*Xaf%$~1Hfj?9MWCTIp%v1K9YY&9G_ln0)4ev+ zc7!o5YS*+4?ey}|D9#|)cp}wwq0cj__)r>kqD{tE;PnC>NT^FCVm(Bb1KarXG$Eh~ zd8~>BVs{`o3Cu!iZ3X&T9Q0EY~p7V=Gm?05|2 z#N`D<94s`5l6EkOp)wW)=naJWye`2-aB5{RVQP(P+a!MTL||&a)X{O@UzEa!XmRlp WY$iCwWlaVp:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Keep headings consistent. */ +h1.doc-heading, +h2.doc-heading, +h3.doc-heading, +h4.doc-heading, +h5.doc-heading, +h6.doc-heading { + font-weight: 400; + line-height: 1.5; + color: inherit; + text-transform: none; +} + +h1.doc-heading { + font-size: 1.6rem; +} + +h2.doc-heading { + font-size: 1.2rem; +} + +h3.doc-heading { + font-size: 1.15rem; +} + +h4.doc-heading { + font-size: 1.10rem; +} + +h5.doc-heading { + font-size: 1.05rem; +} + +h6.doc-heading { + font-size: 1rem; +} \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.220ee61c.min.js b/assets/javascripts/bundle.220ee61c.min.js new file mode 100644 index 00000000..116072a1 --- /dev/null +++ b/assets/javascripts/bundle.220ee61c.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ci=Object.create;var gr=Object.defineProperty;var Ri=Object.getOwnPropertyDescriptor;var ki=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Hi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,nn=Object.prototype.propertyIsEnumerable;var rn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&rn(e,r,t[r]);if(Ht)for(var r of Ht(t))nn.call(t,r)&&rn(e,r,t[r]);return e};var on=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&nn.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Pi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ki(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=Ri(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ci(Hi(e)):{},Pi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var sn=Pt((xr,an)=>{(function(e,t){typeof xr=="object"&&typeof an!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(O){return!!(O&&O!==document&&O.nodeName!=="HTML"&&O.nodeName!=="BODY"&&"classList"in O&&"contains"in O.classList)}function f(O){var Qe=O.type,De=O.tagName;return!!(De==="INPUT"&&s[Qe]&&!O.readOnly||De==="TEXTAREA"&&!O.readOnly||O.isContentEditable)}function c(O){O.classList.contains("focus-visible")||(O.classList.add("focus-visible"),O.setAttribute("data-focus-visible-added",""))}function u(O){O.hasAttribute("data-focus-visible-added")&&(O.classList.remove("focus-visible"),O.removeAttribute("data-focus-visible-added"))}function p(O){O.metaKey||O.altKey||O.ctrlKey||(a(r.activeElement)&&c(r.activeElement),n=!0)}function m(O){n=!1}function d(O){a(O.target)&&(n||f(O.target))&&c(O.target)}function h(O){a(O.target)&&(O.target.classList.contains("focus-visible")||O.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(O.target))}function v(O){document.visibilityState==="hidden"&&(o&&(n=!0),Y())}function Y(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function B(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(O){O.target.nodeName&&O.target.nodeName.toLowerCase()==="html"||(n=!1,B())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),Y(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var cn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},s=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(B,N){d.append(N,B)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(O){throw new Error("URL unable to set base "+c+" due to "+O)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,Y=!0,B=this;["append","delete","set"].forEach(function(O){var Qe=h[O];h[O]=function(){Qe.apply(h,arguments),v&&(Y=!1,B.search=h.toString(),Y=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,Y&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(f){Object.defineProperty(s,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){a(f)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var qr=Pt((Mt,Nr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof Nr=="object"?Nr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ai}});var s=i(279),a=i.n(s),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(T){return!1}}var d=function(T){var E=p()(T);return m("cut"),E},h=d;function v(j){var T=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[T?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var Y=function(T,E){var H=v(T);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},B=function(T){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof T=="string"?H=Y(T,E):T instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(T==null?void 0:T.type)?H=Y(T.value,E):(H=p()(T),m("copy")),H},N=B;function O(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?O=function(E){return typeof E}:O=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},O(j)}var Qe=function(){var T=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=T.action,H=E===void 0?"copy":E,I=T.container,q=T.target,Me=T.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&O(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function Ei(j,T){if(!(j instanceof T))throw new TypeError("Cannot call a class as a function")}function tn(j,T){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=c()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ai=Li},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,f){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(f))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return c(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),s=f.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var f=this;function c(){f.off(i,c),s.apply(a,arguments)}return c._=s,this.on(i,c,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=a.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var rs=/["'&<>]/;Yo.exports=ns;function ns(e){var t=""+e,r=rs.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof et?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function pn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,f){s=e[i](s),o(a,f,s.done,s.value)})}}function o(i,s,a,f){Promise.resolve(f).then(function(c){i({value:c,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),f=a.next();!f.done;f=a.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{ln(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ln(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function ln(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new xn(r,n)},t}(F);var xn=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,f=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Sn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Oe=new Sn(wn);var M=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Te(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=zi();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return un(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return Ni(e);if(pt(e))return qi(e);if(Nt(e))return Ki(e);if(Kt(e))return On(e);if(Gt(e))return Qi(e);if(Jt(e))return Yi(e)}throw Qt(e)}function Ni(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function qi(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Dn(function(){return new Zt}))}}function Vn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,f=a===void 0?!0:a;return function(c){var u,p,m,d=0,h=!1,v=!1,Y=function(){p==null||p.unsubscribe(),p=void 0},B=function(){Y(),u=m=void 0,h=v=!1},N=function(){var O=u;B(),O==null||O.unsubscribe()};return y(function(O,Qe){d++,!v&&!h&&Y();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,f))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,Y(),p=$r(B,o,$e),De.error($e)},complete:function(){h=!0,Y(),p=$r(B,s),De.complete()}}),U(O).subscribe(u))})(c)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),J())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Kn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>rr(e)),V(rr(e)))}var Yn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Wr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),va?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Wr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ba.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Gn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Jn=typeof WeakMap!="undefined"?new WeakMap:new Yn,Xn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ga.getInstance(),n=new La(t,r,this);Jn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Xn.prototype[e]=function(){var t;return(t=Jn.get(this))[e].apply(t,arguments)}});var Aa=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Xn}(),Zn=Aa;var eo=new x,Ca=$(()=>k(new Zn(e=>{for(let t of e)eo.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ca.pipe(S(t=>t.observe(e)),g(t=>eo.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var to=new x,Ra=$(()=>k(new IntersectionObserver(e=>{for(let t of e)to.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function sr(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function ro(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),J())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function no(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function ka(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ha(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function oo(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:no("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!ka(n,r)}return!0}),pe());return Ha().pipe(g(t=>t?M:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function io(){return new x}function ao(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)ao(e,r)}function _(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)ao(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function so(){return location.hash.substring(1)}function Dr(e){let t=_("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Pa(e){return L(b(window,"hashchange"),e).pipe(l(so),V(so()),A(t=>t.length>0),X(1))}function co(e){return Pa(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function Vr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function fo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function zr(e,t){return e.pipe(g(r=>r?t():M))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>M),g(r=>r.status!==200?Ot(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),X(1))}function uo(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),X(1))}function pr(e){let t=_("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Ot(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function po(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function lo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(po),V(po()))}function mo(){return{width:innerWidth,height:innerHeight}}function ho(){return b(window,"resize",{passive:!0}).pipe(l(mo),V(mo()))}function bo(){return G([lo(),ho()]).pipe(l(([e,t])=>({offset:e,size:t})),X(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(ee("size")),o=G([n,r]).pipe(l(()=>Xe(e)));return G([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:f,y:c}])=>({offset:{x:s.x-f,y:s.y-c+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,f,c)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:f,error:c});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Write A New Model

+

comming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/index.html b/en/index.html new file mode 100644 index 00000000..601da3ec --- /dev/null +++ b/en/index.html @@ -0,0 +1,1163 @@ + + + + + + + + + + + + + + + + + + + + + + Home - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO

+

+ + docs + + + GitHub + + + PRs Welcome + +

+ +

MindYOLO is MindSpore Lab's software toolbox that implements state-of-the-art YOLO series algorithms, support list and benchmark. It is written in Python and powered by the MindSpore AI framework.

+

The master branch supporting MindSpore 2.0.

+

+

What is New

+
    +
  • 2023/06/15
  • +
+
    +
  1. Support YOLOv3/v4/v5/X/v7/v8 6 models and release 23 corresponding weights, see MODEL ZOO for details.
  2. +
  3. Support MindSpore 2.0.
  4. +
  5. Support deployment on MindSpore lite 2.0.
  6. +
  7. New online documents are available!
  8. +
+

Benchmark and Model Zoo

+

See MODEL ZOO.

+
+Supported Algorithms + +
+

Installation

+

See INSTALLATION for details.

+

Getting Started

+

See GETTING STARTED for details.

+

Learn More about MindYOLO

+

To be supplemented.

+

Notes

+

⚠️ The current version is based on the static shape of GRAPH. The dynamic shape of the PYNATIVE will be supported later. Please look forward to it.

+

How to Contribute

+

We appreciate all contributions including issues and PRs to make MindYOLO better.

+

Please refer to CONTRIBUTING.md for the contributing guideline.

+

License

+

MindYOLO is released under the Apache License 2.0.

+

Acknowledgement

+

MindYOLO is an open source project that welcome any contribution and feedback. We wish that the toolbox and benchmark could serve the growing research community by providing a flexible as well as standardized toolkit to reimplement existing methods and develop their own new realtime object detection methods.

+

Citation

+

If you find this project useful in your research, please consider cite:

+
@misc{MindSpore Object Detection YOLO 2023,
+    title={{MindSpore Object Detection YOLO}:MindSpore Object Detection YOLO Toolbox and Benchmark},
+    author={MindSpore YOLO Contributors},
+    howpublished = {\url{https://github.com/mindspore-lab/mindyolo}},
+    year={2023}
+}
+
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/installation/index.html b/en/installation/index.html new file mode 100644 index 00000000..b63403a1 --- /dev/null +++ b/en/installation/index.html @@ -0,0 +1,960 @@ + + + + + + + + + + + + + + + + + + + + Installation - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Installation

+

Dependency

+
    +
  • mindspore >= 2.0
  • +
  • numpy >= 1.17.0
  • +
  • pyyaml >= 5.3
  • +
  • openmpi 4.0.3 (for distributed mode)
  • +
+

To install the dependency, please run

+
pip install -r requirements.txt
+
+

MindSpore can be easily installed by following the official instructions where you can select your hardware platform for the best fit. To run in distributed mode, openmpi is required to install.

+

⚠️ The current version only supports the Ascend platform, and the GPU platform will be supported later.

+

Install with PyPI

+

MindYOLO is published as a Python package and can be installed with pip, ideally by using a virtual environment. Open up a terminal and install MindYOLO with:

+
pip install mindyolo
+
+

Install from Source (Bleeding Edge Version)

+

from VCS

+
pip install git+https://github.com/mindspore-lab/mindyolo.git
+
+

from local src

+

As this project is in active development, if you are a developer or contributor, please prefer this installation!

+

MindYOLO can be directly used from GitHub by cloning the repository into a local folder which might be useful if you want to use the very latest version:

+
git clone https://github.com/mindspore-lab/mindyolo.git
+
+

After cloning from git, it is recommended that you install using "editable" mode, which can help resolve potential module import issues:

+
cd mindyolo
+pip install -e .
+
+

In addition, we provide an optional fast coco api to improve eval speed. The code is provided in C++, and you can try compiling with the following commands (This operation is optional) :

+
cd mindyolo/csrc
+sh build.sh
+
+ + + + + + + + +
+
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/modelzoo/index.html b/en/modelzoo/index.html new file mode 100644 index 00000000..11a4c303 --- /dev/null +++ b/en/modelzoo/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + + + + + + Benchmark - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Model Zoo

+ +

MindYOLO Model Zoo and Baselines

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND910x8-G640MS COCO 201737.23.2M8.7Gyamlweights
YOLOv8SD910x8-G640MS COCO 201744.611.2M28.6Gyamlweights
YOLOv8MD910x8-G640MS COCO 201750.525.9M78.9Gyamlweights
YOLOv8LD910x8-G640MS COCO 201752.843.7M165.2Gyamlweights
YOLOv8XD910x8-G640MS COCO 201753.768.2M257.8Gyamlweights
YOLOv7TinyD910x8-G640MS COCO 201737.56.2M13.8Gyamlweights
YOLOv7LD910x8-G640MS COCO 201750.836.9M104.7Gyamlweights
YOLOv7XD910x8-G640MS COCO 201752.471.3M189.9Gyamlweights
YOLOv5ND910x8-G640MS COCO 201727.31.9M4.5Gyamlweights
YOLOv5SD910x8-G640MS COCO 201737.67.2M16.5Gyamlweights
YOLOv5MD910x8-G640MS COCO 201744.921.2M49.0Gyamlweights
YOLOv5LD910x8-G640MS COCO 201748.546.5M109.1Gyamlweights
YOLOv5XD910x8-G640MS COCO 201750.586.7M205.7Gyamlweights
YOLOv4CSPDarknet53D910x8-G608MS COCO 201745.427.6M52Gyamlweights
YOLOv4CSPDarknet53(silu)D910x8-G608MS COCO 201745.827.6M52Gyamlweights
YOLOv3Darknet53D910x8-G640MS COCO 201745.561.9M156.4Gyamlweights
YOLOXND910x8-G416MS COCO 201724.10.9M1.1Gyamlweights
YOLOXTinyD910x8-G416MS COCO 201733.35.1M6.5Gyamlweights
YOLOXSD910x8-G640MS COCO 201740.79.0M26.8Gyamlweights
YOLOXMD910x8-G640MS COCO 201746.725.3M73.8Gyamlweights
YOLOXLD910x8-G640MS COCO 201749.254.2M155.6Gyamlweights
YOLOXXD910x8-G640MS COCO 201751.699.1M281.9Gyamlweights
YOLOXDarknet53D910x8-G640MS COCO 201747.763.7M185.3Gyamlweights
+


+

Depoly inference

+ +

Notes

+
    +
  • Context: Training context denoted as {device}x{pieces}-{MS mode}, where mindspore mode can be G - graph mode or F - pynative mode with ms function. For example, D910x8-G is for training on 8 pieces of Ascend 910 NPU using graph mode.
  • +
  • Box mAP: Accuracy reported on the validation set.
  • +
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/notes/changelog/index.html b/en/notes/changelog/index.html new file mode 100644 index 00000000..57dd9d8a --- /dev/null +++ b/en/notes/changelog/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + Change Log - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Change Log

+

Coming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/notes/code_of_conduct/index.html b/en/notes/code_of_conduct/index.html new file mode 100644 index 00000000..add34448 --- /dev/null +++ b/en/notes/code_of_conduct/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + Code of Conduct - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Code of Conduct

+

Coming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/notes/contributing/index.html b/en/notes/contributing/index.html new file mode 100644 index 00000000..8b60283a --- /dev/null +++ b/en/notes/contributing/index.html @@ -0,0 +1,1192 @@ + + + + + + + + + + + + + + + + + + + + + + + + Contributing - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO contributing guidelines

+ + + + + +

Contributor License Agreement

+

It's required to sign CLA before your first code submission to MindYOLO community.

+

For individual contributor, please refer to ICLA online document for the detailed information.

+

Getting Started

+ +

Contribution Workflow

+

Code style

+

Please follow this style to make MindYOLO easy to review, maintain and develop.

+
    +
  • +

    Coding guidelines

    +

    The Python coding style suggested by Python PEP 8 Coding Style and C++ coding style suggested by Google C++ Coding Guidelines are used in MindYOLO community. The CppLint, CppCheck, CMakeLint, CodeSpell, Lizard, ShellCheck and PyLint are used to check the format of codes, installing these plugins in your IDE is recommended.

    +
  • +
  • +

    Unittest guidelines

    +

    The Python unittest style suggested by pytest and C++ unittest style suggested by Googletest Primer are used in MindYOLO community. The design intent of a testcase should be reflected by its name of comment.

    +
  • +
  • +

    Refactoring guidelines

    +

    We encourage developers to refactor our code to eliminate the code smell. All codes should conform to needs to the coding style and testing style, and refactoring codes are no exception. Lizard threshold for nloc (lines of code without comments) is 100 and for cnc (cyclomatic complexity number) is 20, when you receive a Lizard warning, you have to refactor the code you want to merge.

    +
  • +
  • +

    Document guidelines

    +

    We use MarkdownLint to check the format of markdown documents. MindYOLO CI modifies the following rules based on the default configuration. +- MD007 (unordered list indentation): The indent parameter is set to 4, indicating that all content in the unordered list needs to be indented using four spaces. +- MD009 (spaces at the line end): The br_spaces parameter is set to 2, indicating that there can be 0 or 2 spaces at the end of a line. +- MD029 (sequence numbers of an ordered list): The style parameter is set to ordered, indicating that the sequence numbers of the ordered list are in ascending order.

    +

    For details, please refer to RULES.

    +
  • +
+

Fork-Pull development model

+
    +
  • +

    Fork MindYOLO repository

    +

    Before submitting code to MindYOLO project, please make sure that this project have been forked to your own repository. It means that there will be parallel development between MindYOLO repository and your own repository, so be careful to avoid the inconsistency between them.

    +
  • +
  • +

    Clone the remote repository

    +

    If you want to download the code to the local machine, git is the best way:

    +
    # For GitHub
    +git clone https://github.com/{insert_your_forked_repo}/mindyolo.git
    +git remote add upstream https://github.com/mindspore-lab/mindyolo.git
    +
    +
  • +
  • +

    Develop code locally

    +

    To avoid inconsistency between multiple branches, checking out to a new branch is SUGGESTED:

    +
    git checkout -b {new_branch_name} origin/master
    +
    +

    Taking the master branch as an example, MindYOLO may create version branches and downstream development branches as needed, please fix bugs upstream first. +Then you can change the code arbitrarily.

    +
  • +
  • +

    Push the code to the remote repository

    +

    After updating the code, you should push the update in the formal way:

    +
    git add .
    +git status # Check the update status
    +git commit -m "Your commit title"
    +git commit -s --amend #Add the concrete description of your commit
    +git push origin {new_branch_name}
    +
    +
  • +
  • +

    Pull a request to MindYOLO repository

    +

    In the last step, your need to pull a compare request between your new branch and MindYOLO master branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test. Your pull request should be merged into the upstream master branch as soon as possible to reduce the risk of merging.

    +
  • +
+

Report issues

+

A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it!

+

When reporting issues, refer to this format:

+
    +
  • What version of env (MindSpore, os, python, MindYOLO etc) are you using?
  • +
  • Is this a BUG REPORT or FEATURE REQUEST?
  • +
  • What kind of issue is, add the labels to highlight it on the issue dashboard.
  • +
  • What happened?
  • +
  • What you expected to happen?
  • +
  • How to reproduce it?(as minimally and precisely as possible)
  • +
  • Special notes for your reviewers?
  • +
+

Issues advisory:

+
    +
  • If you find an unclosed issue, which is exactly what you are going to solve, please put some comments on that issue to tell others you would be in charge of it.
  • +
  • If an issue is opened for a while, it's recommended for contributors to precheck before working on solving that issue.
  • +
  • If you resolve an issue which is reported by yourself, it's also required to let others know before closing that issue.
  • +
  • If you want the issue to be responded as quickly as possible, please try to label it, you can find kinds of labels on Label List
  • +
+

Propose PRs

+
    +
  • Raise your idea as an issue on GitHub
  • +
  • If it is a new feature that needs lots of design details, a design proposal should also be submitted.
  • +
  • After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
  • +
  • None of PRs is not permitted until it receives 2+ LGTM from approvers. Please NOTICE that approver is NOT allowed to add LGTM on his own PR.
  • +
  • After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.
  • +
+

PRs advisory:

+
    +
  • Any irrelevant changes should be avoided.
  • +
  • Make sure your commit history being ordered.
  • +
  • Always keep your branch up with the master branch.
  • +
  • For bug-fix PRs, make sure all related issues being linked.
  • +
+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/notes/faq/index.html b/en/notes/faq/index.html new file mode 100644 index 00000000..aa0c3458 --- /dev/null +++ b/en/notes/faq/index.html @@ -0,0 +1,910 @@ + + + + + + + + + + + + + + + + + + + + + + FAQ - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/reference/data/index.html b/en/reference/data/index.html new file mode 100644 index 00000000..a09237f4 --- /dev/null +++ b/en/reference/data/index.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + + + + + + + + + + + + + data - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/reference/loss/index.html b/en/reference/loss/index.html new file mode 100644 index 00000000..fcecdac4 --- /dev/null +++ b/en/reference/loss/index.html @@ -0,0 +1,888 @@ + + + + + + + + + + + + + + + + + + + + Loss - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/reference/models/index.html b/en/reference/models/index.html new file mode 100644 index 00000000..0b35469c --- /dev/null +++ b/en/reference/models/index.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + + + + + + + + + + + + models - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Models

+

Create Model

+ + + +
+ + + +

+mindyolo.models.model_factory.create_model(model_name, model_cfg=None, in_channels=3, num_classes=80, checkpoint_path='', **kwargs) + +

+ + +
+ +
+ Source code in mindyolo/models/model_factory.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def create_model(
+    model_name: str,
+    model_cfg: dict = None,
+    in_channels: int = 3,
+    num_classes: int = 80,
+    checkpoint_path: str = "",
+    **kwargs,
+):
+    model_args = dict(cfg=model_cfg, num_classes=num_classes, in_channels=in_channels)
+    kwargs = {k: v for k, v in kwargs.items() if v is not None}
+
+    if not is_model(model_name):
+        raise RuntimeError(f"Unknown model {model_name}")
+
+    create_fn = model_entrypoint(model_name)
+    model = create_fn(**model_args, **kwargs)
+
+    if checkpoint_path:
+        assert os.path.isfile(checkpoint_path) and checkpoint_path.endswith(
+            ".ckpt"
+        ), f"[{checkpoint_path}] not a ckpt file."
+        checkpoint_param = load_checkpoint(checkpoint_path)
+        load_param_into_net(model, checkpoint_param)
+        logger.info(f"Load checkpoint from [{checkpoint_path}] success.")
+
+    return model
+
+
+
+ +

yolov3_head

+

yolov4_head

+

yolov5_head

+

yolov7_head

+

yolov8_head

+

yolox_head

+

initializer

+

focal_loss

+

iou_loss

+

label_assignment

+

loss_factory

+

yolov3_loss

+

yolov4_loss

+

yolov5_loss

+

yolov7_loss

+

yolov8_loss

+

yolox_loss

+

yolov3

+

yolov4

+

yolov5

+

yolov7

+

yolov8

+

yolox

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/tutorials/configuration/index.html b/en/tutorials/configuration/index.html new file mode 100644 index 00000000..1ec5cf87 --- /dev/null +++ b/en/tutorials/configuration/index.html @@ -0,0 +1,1145 @@ + + + + + + + + + + + + + + + + + + + + + + + + Configuration - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Configuration

+ +

配置

+

MindYOLO套件同时支持yaml文件参数和命令行参数解析,并将相对固定、与模型强相关、较为复杂或者含有嵌套结构的参数编写成yaml文件,需根据实际应用场景更改或者较为简单的参数则通过命令行传入。

+

下面以yolov3为例,解释如何配置相应的参数。

+

参数继承关系

+

参数优先级由高到低如下,出现同名参数时,低优先级参数会被高优先级参数覆盖

+
    +
  • 用户命令行传入参数
  • +
  • python执行py文件中parser的默认参数
  • +
  • 命令行传入config参数对应的yaml文件参数
  • +
  • 命令行传入config参数对应的yaml文件中__BASE__参数中包含的yaml文件参数,例如yolov3.yaml含有如下参数: +
    __BASE__: [
    +  '../coco.yaml',
    +  './hyp.scratch.yaml',
    +]
    +
  • +
+

基础参数

+

参数说明

+
    +
  • device_target: 所用设备,Ascend/GPU/CPU
  • +
  • save_dir: 运行结果保存路径,默认为./runs
  • +
  • log_interval: 打印日志step间隔,默认为100
  • +
  • is_parallel: 是否分布式训练,默认为False
  • +
  • ms_mode: 使用静态图模式(0)或动态图模式(1),默认为0。
  • +
  • config: yaml配置文件路径
  • +
  • per_batch_size: 每张卡batch size,默认为32
  • +
  • epochs: 训练epoch数,默认为300
  • +
  • ...
  • +
+

parse参数设置

+

该部分参数通常由命令行传入,示例如下:

+
mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True --log_interval 50
+
+

数据集

+

参数说明

+
    +
  • dataset_name: 数据集名称
  • +
  • train_set: 训练集所在路径
  • +
  • val_set: 验证集所在路径
  • +
  • test_set: 测试集所在路径
  • +
  • nc: 数据集类别数
  • +
  • names: 类别名称
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/coco.yaml中定义,通常需修改其中的数据集路径

+

```yaml +data: + dataset_name: coco

+

train_set: ./coco/train2017.txt # 118287 images + val_set: ./coco/val2017.txt # 5000 images + test_set: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

+

nc: 80

+

# class names + names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] + ```

+

数据增强

+

参数说明

+
    +
  • num_parallel_workers: 读取数据的工作进程数
  • +
  • train_transformers: 训练过程数据增强
  • +
  • test_transformers: 验证过程数据增强
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,其中train_transformers和test_transformers均为由字典组成的列表,各字典包含数据增强操作名称、发生概率及该增强方法相关的参数

+

```yaml +data: + num_parallel_workers: 4

+

train_transforms: + - { func_name: mosaic, prob: 1.0, mosaic9_prob: 0.0, translate: 0.1, scale: 0.9 } + - { func_name: mixup, prob: 0.1, alpha: 8.0, beta: 8.0, needed_mosaic: True } + - { func_name: hsv_augment, prob: 1.0, hgain: 0.015, sgain: 0.7, vgain: 0.4 } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: albumentations } + - { func_name: fliplr, prob: 0.5 } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True }

+

test_transforms: + - { func_name: letterbox, scaleup: False } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True } + ```

+

模型

+

参数说明

+
    +
  • model_name: 模型名称
  • +
  • depth_multiple: 模型深度因子
  • +
  • width_multiple: 模型宽度因子
  • +
  • stride: 特征图下采样倍数
  • +
  • anchors: 预设锚框
  • +
  • backbone: 模型骨干网络
  • +
  • head: 模型检测头
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/yolov3.yaml中定义,根据backbon和head参数进行网络构建,参数以嵌套列表的形式呈现,每行代表一层模块,包含4个参数,分别是 输入层编号(-1代表上一层)、模块重复次数、模块名称和模块相应参数。用户也可以不借助yaml文件而直接在py文件中定义和注册网络。 +```yaml +network: + model_name: yolov3

+

depth_multiple: 1.0 # model depth multiple + width_multiple: 1.0 # layer channel multiple + stride: [8, 16, 32] + anchors: + - [10,13, 16,30, 33,23] # P⅜ + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32

+

# darknet53 backbone + backbone: + # [from, number, module, args] + [[-1, 1, ConvNormAct, [32, 3, 1]], # 0 + [-1, 1, ConvNormAct, [64, 3, 2]], # 1-P½ + [-1, 1, Bottleneck, [64]], + [-1, 1, ConvNormAct, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, ConvNormAct, [256, 3, 2]], # 5-P⅜ + [-1, 8, Bottleneck, [256]], + [-1, 1, ConvNormAct, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, ConvNormAct, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ]

+

# YOLOv3 head + head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], # 15 (P5/32-large)

+
 [-2, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, ConvNormAct, [512, 3, 1]],  # 22 (P4/16-medium)
+
+ [-2, 1, ConvNormAct, [128, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]],  # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, YOLOv3Head, [nc, anchors, stride]],   # Detect(P3, P4, P5)
+]
+
+

```

+

损失函数

+

参数说明

+
    +
  • name: 损失函数名称
  • +
  • box: box损失权重
  • +
  • cls: class损失权重
  • +
  • cls_pw: class损失正样本权重
  • +
  • obj: object损失权重
  • +
  • obj_pw: object损失正样本权重
  • +
  • fl_gamma: focal loss gamma
  • +
  • anchor_t: anchor shape比例阈值
  • +
  • label_smoothing: 标签平滑值
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义

+

yaml +loss: + name: YOLOv7Loss + box: 0.05 # box loss gain + cls: 0.5 # cls loss gain + cls_pw: 1.0 # cls BCELoss positive_weight + obj: 1.0 # obj loss gain (scale with pixels) + obj_pw: 1.0 # obj BCELoss positive_weight + fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) + anchor_t: 4.0 # anchor-multiple threshold + label_smoothing: 0.0 # label smoothing epsilon

+

优化器

+

参数说明

+
    +
  • optimizer: 优化器名称。
  • +
  • lr_init: 学习率初始值
  • +
  • warmup_epochs: warmup epoch数
  • +
  • warmup_momentum: warmup momentum初始值
  • +
  • warmup_bias_lr: warmup bias学习率初始值
  • +
  • min_warmup_step: 最小warmup step数
  • +
  • group_param: 参数分组策略
  • +
  • gp_weight_decay: 分组参数权重衰减系数
  • +
  • start_factor: 初始学习率因数
  • +
  • end_factor: 结束学习率因数
  • +
  • momentum:移动平均的动量
  • +
  • loss_scale:loss缩放系数
  • +
  • nesterov:是否使用Nesterov Accelerated Gradient (NAG)算法更新梯度。
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,如下示例中经过warmup阶段后的初始学习率为lr_init * start_factor = 0.01 * 1.0 = 0.01, 最终学习率为lr_init * end_factor = 0.01 * 0.01 = 0.0001

+

yaml +optimizer: + optimizer: momentum + lr_init: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) + momentum: 0.937 # SGD momentum/Adam beta1 + nesterov: True # update gradients with NAG(Nesterov Accelerated Gradient) algorithm + loss_scale: 1.0 # loss scale for optimizer + warmup_epochs: 3 # warmup epochs (fractions ok) + warmup_momentum: 0.8 # warmup initial momentum + warmup_bias_lr: 0.1 # warmup initial bias lr + min_warmup_step: 1000 # minimum warmup step + group_param: yolov7 # group param strategy + gp_weight_decay: 0.0005 # group param weight decay 5e-4 + start_factor: 1.0 + end_factor: 0.01

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/tutorials/deployment/index.html b/en/tutorials/deployment/index.html new file mode 100644 index 00000000..040be28b --- /dev/null +++ b/en/tutorials/deployment/index.html @@ -0,0 +1,1277 @@ + + + + + + + + + + + + + + + + + + + + + + + + Deployment - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Deployment

+ +

MindYOLO部署

+

依赖

+
pip install -r requirement.txt
+
+

MindSpore Lite环境准备

+

参考:Lite环境配置
+ 注意:MindSpore Lite适配的python环境为3.7,请在安装Lite前准备好python3.7的环境
+ 1. 根据环境,下载配套的tar.gz包和whl包 + 2. 解压tar.gz包并安装对应版本的whl包 +

tar -zxvf mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.tar.gz
+pip install mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.whl
+
+ 3. 配置Lite的环境变量 + LITE_HOME为tar.gz解压出的文件夹路径,推荐使用绝对路径 +
export LITE_HOME=/path/to/mindspore-lite-{version}-{os}-{platform}
+export LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH
+export PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH
+

+

快速开始

+

模型转换

+

ckpt模型转为mindir模型,此步骤可在CPU/Ascend910上运行 +

python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target [CPU/Ascend]
+e.g.
+# 在CPU上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU
+# 在Ascend上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target Ascend
+

+

Lite Test

+
python deploy/test.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_config/yolo.yaml
+e.g.
+python deploy/test.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml
+
+

Lite Predict

+
python ./deploy/predict.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg
+e.g.
+python deploy/predict.py --model_type Lite --model_path ./yolov5n.mindir --conifg ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg
+
+

脚本说明

+
    +
  • predict.py 支持单张图片推理
  • +
  • test.py 支持COCO数据集推理
  • +
+

MindX部署

+

查看 MINDX

+

标准和支持的模型库

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND310x1-G640MS COCO 201737.23.2M8.7Gyamlckpt
mindir
YOLOv8SD310x1-G640MS COCO 201744.611.2M28.6Gyamlckpt
mindir
YOLOv8MD310x1-G640MS COCO 201750.525.9M78.9Gyamlckpt
mindir
YOLOv8LD310x1-G640MS COCO 201752.843.7M165.2Gyamlckpt
mindir
YOLOv8XD310x1-G640MS COCO 201753.768.2M257.8Gyamlckpt
mindir
YOLOv7TinyD310x1-G640MS COCO 201737.56.2M13.8Gyamlckpt
mindir
YOLOv7LD310x1-G640MS COCO 201750.836.9M104.7Gyamlckpt
mindir
YOLOv7XD310x1-G640MS COCO 201752.471.3M189.9Gyamlckpt
mindir
YOLOv5ND310x1-G640MS COCO 201727.31.9M4.5Gyamlckpt
mindir
YOLOv5SD310x1-G640MS COCO 201737.67.2M16.5Gyamlckpt
mindir
YOLOv5MD310x1-G640MS COCO 201744.921.2M49.0Gyamlckpt
mindir
YOLOv5LD310x1-G640MS COCO 201748.546.5M109.1Gyamlckpt
mindir
YOLOv5XD310x1-G640MS COCO 201750.586.7M205.7Gyamlckpt
mindir
YOLOv4CSPDarknet53D310x1-G608MS COCO 201745.427.6M52Gyamlckpt
mindir
YOLOv4CSPDarknet53(silu)D310x1-G640MS COCO 201745.827.6M52Gyamlckpt
mindir
YOLOv3Darknet53D310x1-G640MS COCO 201745.561.9M156.4Gyamlckpt
mindir
YOLOXND310x1-G416MS COCO 201724.10.9M1.1Gyamlckpt
mindir
YOLOXTinyD310x1-G416MS COCO 201733.35.1M6.5Gyamlckpt
mindir
YOLOXSD310x1-G640MS COCO 201740.79.0M26.8Gyamlckpt
mindir
YOLOXMD310x1-G640MS COCO 201746.725.3M73.8Gyamlckpt
mindir
YOLOXLD310x1-G640MS COCO 201749.254.2M155.6Gyamlckpt
mindir
YOLOXXD310x1-G640MS COCO 201751.699.1M281.9Gyamlckpt
mindir
YOLOXDarknet53D310x1-G640MS COCO 201747.763.7M185.3Gyamlckpt
mindir
+


+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/tutorials/finetune/index.html b/en/tutorials/finetune/index.html new file mode 100644 index 00000000..b6f4f7fa --- /dev/null +++ b/en/tutorials/finetune/index.html @@ -0,0 +1,1080 @@ + + + + + + + + + + + + + + + + + + + + + + + + Finetune - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Finetune

+ +

自定义数据集finetune流程

+

本文以安全帽佩戴检测数据集(SHWD)为例,介绍自定义数据集在MindYOLO上进行finetune的主要流程。

+

数据集格式转换

+

SHWD数据集采用voc格式的数据标注,其文件目录如下所示: +

             ROOT_DIR
+                ├── Annotations
+                │        ├── 000000.xml
+                │        └── 000002.xml
+                ├── ImageSets
+                │       └── Main
+                │             ├── test.txt
+                │             ├── train.txt
+                │             ├── trainval.txt
+                │             └── val.txt
+                └── JPEGImages
+                        ├── 000000.jpg
+                        └── 000002.jpg
+
+其中,ImageSets/Main文件下的txt文件中每行代表相应子集中单张图片不含后缀的文件名,例如: +
000002
+000005
+000019
+000022
+000027
+000034
+

+

由于MindYOLO在验证阶段选用图片名称作为image_id,因此图片名称只能为数值类型,而不能为字符串类型,还需要对图片进行改名。对SHWD数据集格式的转换包含如下步骤: +* 将图片复制到相应的路径下并改名 +* 在根目录下相应的txt文件中写入该图片的相对路径 +* 解析xml文件,在相应路径下生成对应的txt标注文件 +* 验证集还需生成最终的json文件

+

详细实现可参考convert_shwd2yolo.py。运行方式如下:

+
python examples/finetune_SHWD/convert_shwd2yolo.py --root_dir /path_to_shwd/SHWD
+
+

运行以上命令将在不改变原数据集的前提下,在同级目录生成yolo格式的SHWD数据集。

+

预训练模型文件转换

+

由于SHWD数据集只有7000+张图片,选择yolov7-tiny进行该数据集的训练,可下载MindYOLO提供的在coco数据集上训练好的模型文件作为预训练模型。由于coco数据集含有80种物体类别,SHWD数据集只有两类,模型的最后一层head层输出与类别数nc有关,因此需将预训练模型文件的最后一层去掉, 可参考convert_yolov7-tiny_pretrain_ckpt.py。运行方式如下:

+
python examples/finetune_SHWD/convert_yolov7-tiny_pretrain_ckpt.py
+
+

模型微调(Finetune)

+

简要的训练流程可参考finetune_shwd.py

+
    +
  • 在多卡NPU/GPU上进行分布式模型训练,以8卡为例:
  • +
+
mpirun --allow-run-as-root -n 8 python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml --is_parallel True
+
+
    +
  • 在单卡NPU/GPU/CPU上训练模型:
  • +
+
python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml 
+
+

注意:直接用yolov7-tiny默认coco参数在SHWD数据集上训练,可取得AP50 87.0的精度。将lr_init参数由0.01改为0.001,即可实现ap50为89.2的精度结果。

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/tutorials/modelarts/index.html b/en/tutorials/modelarts/index.html new file mode 100644 index 00000000..2a0fa613 --- /dev/null +++ b/en/tutorials/modelarts/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + + + + + + + + + + + + + + CloudBrain - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/en/tutorials/quick_start/index.html b/en/tutorials/quick_start/index.html new file mode 100644 index 00000000..49ccf4ff --- /dev/null +++ b/en/tutorials/quick_start/index.html @@ -0,0 +1,1108 @@ + + + + + + + + + + + + + + + + + + + + + + + + Quick Start - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Quick Start

+ +

Getting Started with MindYOLO

+

This document provides a brief introduction to the usage of built-in command-line tools in MindYOLO.

+

Inference Demo with Pre-trained Models

+
    +
  1. Pick a model and its config file from the + model zoo, + such as, ./configs/yolov7/yolov7.yaml.
  2. +
  3. Download the corresponding pre-trained checkpoint from the model zoo of each model.
  4. +
  5. To run YOLO object detection with the built-in configs, please run:
  6. +
+
# Run with Ascend (By default)
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg
+
+# Run with GPU
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg --device_target=GPU
+
+

For details of the command line arguments, see demo/predict.py -h or look at its source code +to understand their behavior. Some common arguments are: +* To run on cpu, modify device_target to CPU. +* The results will be saved in ./detect_results

+

Training & Evaluation in Command Line

+
    +
  • Prepare your dataset in YOLO format. If trained with COCO (YOLO format), prepare it from yolov5 or the darknet.
  • +
+
+ +
  coco/
+    {train,val}2017.txt
+    annotations/
+      instances_{train,val}2017.json
+    images/
+      {train,val}2017/
+          00000001.jpg
+          ...
+          # image files that are mentioned in the corresponding train/val2017.txt
+    labels/
+      {train,val}2017/
+          00000001.txt
+          ...
+          # label files that are mentioned in the corresponding train/val2017.txt
+
+
+ +
    +
  • +

    To train a model on 8 NPUs/GPUs: +

    mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True
    +

    +
  • +
  • +

    To train a model on 1 NPU/GPU/CPU: +

    python train.py --config ./configs/yolov7/yolov7.yaml 
    +

    +
  • +
  • +

    To evaluate a model's performance: +

    python test.py --config ./configs/yolov7/yolov7.yaml --weight /path_to_ckpt/WEIGHT.ckpt
    +
    +Notes: (1) The default hyper-parameter is used for 8-card training, and some parameters need to be adjusted in the case of a single card. (2) The default device is Ascend, and you can modify it by specifying 'device_target' as Ascend/GPU/CPU, as these are currently supported.

    +
  • +
  • For more options, see train/test.py -h.
  • +
+

Deployment

+

See here.

+

To use MindYOLO APIs in Your Code

+

To be supplemented.

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/gen_ref_pages.py b/gen_ref_pages.py new file mode 100644 index 00000000..d177b4d2 --- /dev/null +++ b/gen_ref_pages.py @@ -0,0 +1,59 @@ +"""Generate the code reference pages of models.""" +import os +import sys + +sys.path.append(".") + +import importlib +import logging +from pathlib import Path + +_logger = logging.getLogger('mkdocs') +_langs = ["en", "zh"] + + +def _gen_page(lang): + full_doc_path = Path(f"docs/{lang}/reference/models.md") + _logger.info(f"Generating reference page: {full_doc_path}") + with open(full_doc_path, "w") as fd: + print("# Models", file=fd) + print("\n\n## Create Model", file=fd) + print("\n### ::: mindyolo.models.model_factory.create_model", file=fd) + + for path in sorted(Path("mindyolo/models").rglob("*.py")): + module_path = path.with_suffix("") # eg: mindyolo/models/resnet + parts = list(module_path.parts) # eg: ["mindyolo", "models", "resnet"] + if parts[-1].startswith("__") or parts[-2] == "layers": + continue + # fileter out utility modules + if parts[-1] in ["model_factory", "registry", "utils", "helpers"]: + continue + # filter out the net module which is replaced by the net function with the same name + # TODO: we need to change mechanism of model importing + if parts[-1] in ["googlenet", "inception_v3", "inception_v4", "xception", "pnasnet"]: + continue + + try: + print(f"\n\n## {parts[-1]}", file=fd) + identifier = ".".join(parts) # eg: mindyolo.models.resnet + mod = importlib.import_module(identifier) + for mem in sorted(set(mod.__all__)): + print(f"\n### ::: {identifier}.{mem}", file=fd) + except Exception as err: + _logger.warning(f"Cannot generate reference of {identifier}, error: {err}.") + + +def _del_page(lang): + full_doc_path = Path(f"docs/{lang}/reference/models.md") + _logger.info(f"Cleaning generated reference page: {full_doc_path}") + os.remove(full_doc_path) + + +def on_startup(command, dirty): + for lang in _langs: + _gen_page(lang) + + +def on_shutdown(): + for lang in _langs: + _del_page(lang) diff --git a/how_to_guides/write_a_new_model/index.html b/how_to_guides/write_a_new_model/index.html new file mode 100644 index 00000000..444e199b --- /dev/null +++ b/how_to_guides/write_a_new_model/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + Write A New Model - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Write A New Model

+

comming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..8ecc488d --- /dev/null +++ b/index.html @@ -0,0 +1,1163 @@ + + + + + + + + + + + + + + + + + + + + + + Home - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO

+

+ + docs + + + GitHub + + + PRs Welcome + +

+ +

MindYOLO is MindSpore Lab's software toolbox that implements state-of-the-art YOLO series algorithms, support list and benchmark. It is written in Python and powered by the MindSpore AI framework.

+

The master branch supporting MindSpore 2.0.

+

+

What is New

+
    +
  • 2023/06/15
  • +
+
    +
  1. Support YOLOv3/v4/v5/X/v7/v8 6 models and release 23 corresponding weights, see MODEL ZOO for details.
  2. +
  3. Support MindSpore 2.0.
  4. +
  5. Support deployment on MindSpore lite 2.0.
  6. +
  7. New online documents are available!
  8. +
+

Benchmark and Model Zoo

+

See MODEL ZOO.

+
+Supported Algorithms + +
+

Installation

+

See INSTALLATION for details.

+

Getting Started

+

See GETTING STARTED for details.

+

Learn More about MindYOLO

+

To be supplemented.

+

Notes

+

⚠️ The current version is based on the static shape of GRAPH. The dynamic shape of the PYNATIVE will be supported later. Please look forward to it.

+

How to Contribute

+

We appreciate all contributions including issues and PRs to make MindYOLO better.

+

Please refer to CONTRIBUTING.md for the contributing guideline.

+

License

+

MindYOLO is released under the Apache License 2.0.

+

Acknowledgement

+

MindYOLO is an open source project that welcome any contribution and feedback. We wish that the toolbox and benchmark could serve the growing research community by providing a flexible as well as standardized toolkit to reimplement existing methods and develop their own new realtime object detection methods.

+

Citation

+

If you find this project useful in your research, please consider cite:

+
@misc{MindSpore Object Detection YOLO 2023,
+    title={{MindSpore Object Detection YOLO}:MindSpore Object Detection YOLO Toolbox and Benchmark},
+    author={MindSpore YOLO Contributors},
+    howpublished = {\url{https://github.com/mindspore-lab/mindyolo}},
+    year={2023}
+}
+
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/installation/index.html b/installation/index.html new file mode 100644 index 00000000..77e5e112 --- /dev/null +++ b/installation/index.html @@ -0,0 +1,960 @@ + + + + + + + + + + + + + + + + + + + + Installation - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Installation

+

Dependency

+
    +
  • mindspore >= 2.0
  • +
  • numpy >= 1.17.0
  • +
  • pyyaml >= 5.3
  • +
  • openmpi 4.0.3 (for distributed mode)
  • +
+

To install the dependency, please run

+
pip install -r requirements.txt
+
+

MindSpore can be easily installed by following the official instructions where you can select your hardware platform for the best fit. To run in distributed mode, openmpi is required to install.

+

⚠️ The current version only supports the Ascend platform, and the GPU platform will be supported later.

+

Install with PyPI

+

MindYOLO is published as a Python package and can be installed with pip, ideally by using a virtual environment. Open up a terminal and install MindYOLO with:

+
pip install mindyolo
+
+

Install from Source (Bleeding Edge Version)

+

from VCS

+
pip install git+https://github.com/mindspore-lab/mindyolo.git
+
+

from local src

+

As this project is in active development, if you are a developer or contributor, please prefer this installation!

+

MindYOLO can be directly used from GitHub by cloning the repository into a local folder which might be useful if you want to use the very latest version:

+
git clone https://github.com/mindspore-lab/mindyolo.git
+
+

After cloning from git, it is recommended that you install using "editable" mode, which can help resolve potential module import issues:

+
cd mindyolo
+pip install -e .
+
+

In addition, we provide an optional fast coco api to improve eval speed. The code is provided in C++, and you can try compiling with the following commands (This operation is optional) :

+
cd mindyolo/csrc
+sh build.sh
+
+ + + + + + + + +
+
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/modelzoo/index.html b/modelzoo/index.html new file mode 100644 index 00000000..ab2daa7a --- /dev/null +++ b/modelzoo/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + + + + + + Benchmark - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Model Zoo

+ +

MindYOLO Model Zoo and Baselines

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND910x8-G640MS COCO 201737.23.2M8.7Gyamlweights
YOLOv8SD910x8-G640MS COCO 201744.611.2M28.6Gyamlweights
YOLOv8MD910x8-G640MS COCO 201750.525.9M78.9Gyamlweights
YOLOv8LD910x8-G640MS COCO 201752.843.7M165.2Gyamlweights
YOLOv8XD910x8-G640MS COCO 201753.768.2M257.8Gyamlweights
YOLOv7TinyD910x8-G640MS COCO 201737.56.2M13.8Gyamlweights
YOLOv7LD910x8-G640MS COCO 201750.836.9M104.7Gyamlweights
YOLOv7XD910x8-G640MS COCO 201752.471.3M189.9Gyamlweights
YOLOv5ND910x8-G640MS COCO 201727.31.9M4.5Gyamlweights
YOLOv5SD910x8-G640MS COCO 201737.67.2M16.5Gyamlweights
YOLOv5MD910x8-G640MS COCO 201744.921.2M49.0Gyamlweights
YOLOv5LD910x8-G640MS COCO 201748.546.5M109.1Gyamlweights
YOLOv5XD910x8-G640MS COCO 201750.586.7M205.7Gyamlweights
YOLOv4CSPDarknet53D910x8-G608MS COCO 201745.427.6M52Gyamlweights
YOLOv4CSPDarknet53(silu)D910x8-G608MS COCO 201745.827.6M52Gyamlweights
YOLOv3Darknet53D910x8-G640MS COCO 201745.561.9M156.4Gyamlweights
YOLOXND910x8-G416MS COCO 201724.10.9M1.1Gyamlweights
YOLOXTinyD910x8-G416MS COCO 201733.35.1M6.5Gyamlweights
YOLOXSD910x8-G640MS COCO 201740.79.0M26.8Gyamlweights
YOLOXMD910x8-G640MS COCO 201746.725.3M73.8Gyamlweights
YOLOXLD910x8-G640MS COCO 201749.254.2M155.6Gyamlweights
YOLOXXD910x8-G640MS COCO 201751.699.1M281.9Gyamlweights
YOLOXDarknet53D910x8-G640MS COCO 201747.763.7M185.3Gyamlweights
+


+

Depoly inference

+ +

Notes

+
    +
  • Context: Training context denoted as {device}x{pieces}-{MS mode}, where mindspore mode can be G - graph mode or F - pynative mode with ms function. For example, D910x8-G is for training on 8 pieces of Ascend 910 NPU using graph mode.
  • +
  • Box mAP: Accuracy reported on the validation set.
  • +
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/notes/changelog/index.html b/notes/changelog/index.html new file mode 100644 index 00000000..fd538d37 --- /dev/null +++ b/notes/changelog/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + Change Log - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Change Log

+

Coming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/notes/code_of_conduct/index.html b/notes/code_of_conduct/index.html new file mode 100644 index 00000000..ab483ebf --- /dev/null +++ b/notes/code_of_conduct/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + Code of Conduct - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Code of Conduct

+

Coming soon.

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/notes/contributing/index.html b/notes/contributing/index.html new file mode 100644 index 00000000..da6d6461 --- /dev/null +++ b/notes/contributing/index.html @@ -0,0 +1,1192 @@ + + + + + + + + + + + + + + + + + + + + + + + + Contributing - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO contributing guidelines

+ + + + + +

Contributor License Agreement

+

It's required to sign CLA before your first code submission to MindYOLO community.

+

For individual contributor, please refer to ICLA online document for the detailed information.

+

Getting Started

+ +

Contribution Workflow

+

Code style

+

Please follow this style to make MindYOLO easy to review, maintain and develop.

+
    +
  • +

    Coding guidelines

    +

    The Python coding style suggested by Python PEP 8 Coding Style and C++ coding style suggested by Google C++ Coding Guidelines are used in MindYOLO community. The CppLint, CppCheck, CMakeLint, CodeSpell, Lizard, ShellCheck and PyLint are used to check the format of codes, installing these plugins in your IDE is recommended.

    +
  • +
  • +

    Unittest guidelines

    +

    The Python unittest style suggested by pytest and C++ unittest style suggested by Googletest Primer are used in MindYOLO community. The design intent of a testcase should be reflected by its name of comment.

    +
  • +
  • +

    Refactoring guidelines

    +

    We encourage developers to refactor our code to eliminate the code smell. All codes should conform to needs to the coding style and testing style, and refactoring codes are no exception. Lizard threshold for nloc (lines of code without comments) is 100 and for cnc (cyclomatic complexity number) is 20, when you receive a Lizard warning, you have to refactor the code you want to merge.

    +
  • +
  • +

    Document guidelines

    +

    We use MarkdownLint to check the format of markdown documents. MindYOLO CI modifies the following rules based on the default configuration. +- MD007 (unordered list indentation): The indent parameter is set to 4, indicating that all content in the unordered list needs to be indented using four spaces. +- MD009 (spaces at the line end): The br_spaces parameter is set to 2, indicating that there can be 0 or 2 spaces at the end of a line. +- MD029 (sequence numbers of an ordered list): The style parameter is set to ordered, indicating that the sequence numbers of the ordered list are in ascending order.

    +

    For details, please refer to RULES.

    +
  • +
+

Fork-Pull development model

+
    +
  • +

    Fork MindYOLO repository

    +

    Before submitting code to MindYOLO project, please make sure that this project have been forked to your own repository. It means that there will be parallel development between MindYOLO repository and your own repository, so be careful to avoid the inconsistency between them.

    +
  • +
  • +

    Clone the remote repository

    +

    If you want to download the code to the local machine, git is the best way:

    +
    # For GitHub
    +git clone https://github.com/{insert_your_forked_repo}/mindyolo.git
    +git remote add upstream https://github.com/mindspore-lab/mindyolo.git
    +
    +
  • +
  • +

    Develop code locally

    +

    To avoid inconsistency between multiple branches, checking out to a new branch is SUGGESTED:

    +
    git checkout -b {new_branch_name} origin/master
    +
    +

    Taking the master branch as an example, MindYOLO may create version branches and downstream development branches as needed, please fix bugs upstream first. +Then you can change the code arbitrarily.

    +
  • +
  • +

    Push the code to the remote repository

    +

    After updating the code, you should push the update in the formal way:

    +
    git add .
    +git status # Check the update status
    +git commit -m "Your commit title"
    +git commit -s --amend #Add the concrete description of your commit
    +git push origin {new_branch_name}
    +
    +
  • +
  • +

    Pull a request to MindYOLO repository

    +

    In the last step, your need to pull a compare request between your new branch and MindYOLO master branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test. Your pull request should be merged into the upstream master branch as soon as possible to reduce the risk of merging.

    +
  • +
+

Report issues

+

A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it!

+

When reporting issues, refer to this format:

+
    +
  • What version of env (MindSpore, os, python, MindYOLO etc) are you using?
  • +
  • Is this a BUG REPORT or FEATURE REQUEST?
  • +
  • What kind of issue is, add the labels to highlight it on the issue dashboard.
  • +
  • What happened?
  • +
  • What you expected to happen?
  • +
  • How to reproduce it?(as minimally and precisely as possible)
  • +
  • Special notes for your reviewers?
  • +
+

Issues advisory:

+
    +
  • If you find an unclosed issue, which is exactly what you are going to solve, please put some comments on that issue to tell others you would be in charge of it.
  • +
  • If an issue is opened for a while, it's recommended for contributors to precheck before working on solving that issue.
  • +
  • If you resolve an issue which is reported by yourself, it's also required to let others know before closing that issue.
  • +
  • If you want the issue to be responded as quickly as possible, please try to label it, you can find kinds of labels on Label List
  • +
+

Propose PRs

+
    +
  • Raise your idea as an issue on GitHub
  • +
  • If it is a new feature that needs lots of design details, a design proposal should also be submitted.
  • +
  • After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
  • +
  • None of PRs is not permitted until it receives 2+ LGTM from approvers. Please NOTICE that approver is NOT allowed to add LGTM on his own PR.
  • +
  • After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.
  • +
+

PRs advisory:

+
    +
  • Any irrelevant changes should be avoided.
  • +
  • Make sure your commit history being ordered.
  • +
  • Always keep your branch up with the master branch.
  • +
  • For bug-fix PRs, make sure all related issues being linked.
  • +
+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/notes/faq/index.html b/notes/faq/index.html new file mode 100644 index 00000000..16b0e9f7 --- /dev/null +++ b/notes/faq/index.html @@ -0,0 +1,910 @@ + + + + + + + + + + + + + + + + + + + + + + FAQ - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 0000000000000000000000000000000000000000..a02009efab59df18edb200edd4f2f4bafed530ee GIT binary patch literal 201 zcmY#Z2rkIT%&Sny%qvUHE6FdaR47X=D$dN$Q!wIERtPA{&q_@$u~P8O%u9*%_wiS7 z$xki@N`yfaSt%Ik0U=NzBqLR!C^a_`s4lfgAwNx_BqOs}AuTf}RRKsR=jRp_r4|>b zrYMvaXXd3VROMtQ>2X!uI(_zx_sTQpPhLNL*5C82W?R6=%+8alKV7F*opfEaxSvity9Ia7TA literal 0 HcmV?d00001 diff --git a/reference/data/index.html b/reference/data/index.html new file mode 100644 index 00000000..f531183d --- /dev/null +++ b/reference/data/index.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + + + + + + + + + + + + + data - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/loss/index.html b/reference/loss/index.html new file mode 100644 index 00000000..9e6e64e4 --- /dev/null +++ b/reference/loss/index.html @@ -0,0 +1,888 @@ + + + + + + + + + + + + + + + + + + + + Loss - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/models/index.html b/reference/models/index.html new file mode 100644 index 00000000..6e422d7c --- /dev/null +++ b/reference/models/index.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + + + + + + + + + + + + models - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Models

+

Create Model

+ + + +
+ + + +

+mindyolo.models.model_factory.create_model(model_name, model_cfg=None, in_channels=3, num_classes=80, checkpoint_path='', **kwargs) + +

+ + +
+ +
+ Source code in mindyolo/models/model_factory.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def create_model(
+    model_name: str,
+    model_cfg: dict = None,
+    in_channels: int = 3,
+    num_classes: int = 80,
+    checkpoint_path: str = "",
+    **kwargs,
+):
+    model_args = dict(cfg=model_cfg, num_classes=num_classes, in_channels=in_channels)
+    kwargs = {k: v for k, v in kwargs.items() if v is not None}
+
+    if not is_model(model_name):
+        raise RuntimeError(f"Unknown model {model_name}")
+
+    create_fn = model_entrypoint(model_name)
+    model = create_fn(**model_args, **kwargs)
+
+    if checkpoint_path:
+        assert os.path.isfile(checkpoint_path) and checkpoint_path.endswith(
+            ".ckpt"
+        ), f"[{checkpoint_path}] not a ckpt file."
+        checkpoint_param = load_checkpoint(checkpoint_path)
+        load_param_into_net(model, checkpoint_param)
+        logger.info(f"Load checkpoint from [{checkpoint_path}] success.")
+
+    return model
+
+
+
+ +

yolov3_head

+

yolov4_head

+

yolov5_head

+

yolov7_head

+

yolov8_head

+

yolox_head

+

initializer

+

focal_loss

+

iou_loss

+

label_assignment

+

loss_factory

+

yolov3_loss

+

yolov4_loss

+

yolov5_loss

+

yolov7_loss

+

yolov8_loss

+

yolox_loss

+

yolov3

+

yolov4

+

yolov5

+

yolov7

+

yolov8

+

yolox

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..83cbd78b --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#mindyolo","title":"MindYOLO","text":"

MindYOLO is MindSpore Lab's software toolbox that implements state-of-the-art YOLO series algorithms, support list and benchmark. It is written in Python and powered by the MindSpore AI framework.

The master branch supporting MindSpore 2.0.

"},{"location":"#what-is-new","title":"What is New","text":"
  • 2023/06/15
  1. Support YOLOv3/v4/v5/X/v7/v8 6 models and release 23 corresponding weights, see MODEL ZOO for details.
  2. Support MindSpore 2.0.
  3. Support deployment on MindSpore lite 2.0.
  4. New online documents are available!
"},{"location":"#benchmark-and-model-zoo","title":"Benchmark and Model Zoo","text":"

See MODEL ZOO.

Supported Algorithms
  • YOLOv8
  • YOLOv7
  • YOLOX
  • YOLOv5
  • YOLOv4
  • YOLOv3
"},{"location":"#installation","title":"Installation","text":"

See INSTALLATION for details.

"},{"location":"#getting-started","title":"Getting Started","text":"

See GETTING STARTED for details.

"},{"location":"#learn-more-about-mindyolo","title":"Learn More about MindYOLO","text":"

To be supplemented.

"},{"location":"#notes","title":"Notes","text":"

\u26a0\ufe0f The current version is based on the static shape of GRAPH. The dynamic shape of the PYNATIVE will be supported later. Please look forward to it.

"},{"location":"#how-to-contribute","title":"How to Contribute","text":"

We appreciate all contributions including issues and PRs to make MindYOLO better.

Please refer to CONTRIBUTING.md for the contributing guideline.

"},{"location":"#license","title":"License","text":"

MindYOLO is released under the Apache License 2.0.

"},{"location":"#acknowledgement","title":"Acknowledgement","text":"

MindYOLO is an open source project that welcome any contribution and feedback. We wish that the toolbox and benchmark could serve the growing research community by providing a flexible as well as standardized toolkit to reimplement existing methods and develop their own new realtime object detection methods.

"},{"location":"#citation","title":"Citation","text":"

If you find this project useful in your research, please consider cite:

@misc{MindSpore Object Detection YOLO 2023,\n    title={{MindSpore Object Detection YOLO}:MindSpore Object Detection YOLO Toolbox and Benchmark},\n    author={MindSpore YOLO Contributors},\n    howpublished = {\\url{https://github.com/mindspore-lab/mindyolo}},\n    year={2023}\n}\n
"},{"location":"installation/","title":"Installation","text":""},{"location":"installation/#dependency","title":"Dependency","text":"
  • mindspore >= 2.0
  • numpy >= 1.17.0
  • pyyaml >= 5.3
  • openmpi 4.0.3 (for distributed mode)

To install the dependency, please run

pip install -r requirements.txt\n

MindSpore can be easily installed by following the official instructions where you can select your hardware platform for the best fit. To run in distributed mode, openmpi is required to install.

\u26a0\ufe0f The current version only supports the Ascend platform, and the GPU platform will be supported later.

"},{"location":"installation/#install-with-pypi","title":"Install with PyPI","text":"

MindYOLO is published as a Python package and can be installed with pip, ideally by using a virtual environment. Open up a terminal and install MindYOLO with:

pip install mindyolo\n
"},{"location":"installation/#install-from-source-bleeding-edge-version","title":"Install from Source (Bleeding Edge Version)","text":""},{"location":"installation/#from-vcs","title":"from VCS","text":"
pip install git+https://github.com/mindspore-lab/mindyolo.git\n
"},{"location":"installation/#from-local-src","title":"from local src","text":"

As this project is in active development, if you are a developer or contributor, please prefer this installation!

MindYOLO can be directly used from GitHub by cloning the repository into a local folder which might be useful if you want to use the very latest version:

git clone https://github.com/mindspore-lab/mindyolo.git\n

After cloning from git, it is recommended that you install using \"editable\" mode, which can help resolve potential module import issues:

cd mindyolo\npip install -e .\n

In addition, we provide an optional fast coco api to improve eval speed. The code is provided in C++, and you can try compiling with the following commands (This operation is optional) :

cd mindyolo/csrc\nsh build.sh\n
"},{"location":"modelzoo/","title":"Model Zoo","text":""},{"location":"modelzoo/#mindyolo-model-zoo-and-baselines","title":"MindYOLO Model Zoo and Baselines","text":"Name Scale Context ImageSize Dataset Box mAP (%) Params FLOPs Recipe Download YOLOv8 N D910x8-G 640 MS COCO 2017 37.2 3.2M 8.7G yaml weights YOLOv8 S D910x8-G 640 MS COCO 2017 44.6 11.2M 28.6G yaml weights YOLOv8 M D910x8-G 640 MS COCO 2017 50.5 25.9M 78.9G yaml weights YOLOv8 L D910x8-G 640 MS COCO 2017 52.8 43.7M 165.2G yaml weights YOLOv8 X D910x8-G 640 MS COCO 2017 53.7 68.2M 257.8G yaml weights YOLOv7 Tiny D910x8-G 640 MS COCO 2017 37.5 6.2M 13.8G yaml weights YOLOv7 L D910x8-G 640 MS COCO 2017 50.8 36.9M 104.7G yaml weights YOLOv7 X D910x8-G 640 MS COCO 2017 52.4 71.3M 189.9G yaml weights YOLOv5 N D910x8-G 640 MS COCO 2017 27.3 1.9M 4.5G yaml weights YOLOv5 S D910x8-G 640 MS COCO 2017 37.6 7.2M 16.5G yaml weights YOLOv5 M D910x8-G 640 MS COCO 2017 44.9 21.2M 49.0G yaml weights YOLOv5 L D910x8-G 640 MS COCO 2017 48.5 46.5M 109.1G yaml weights YOLOv5 X D910x8-G 640 MS COCO 2017 50.5 86.7M 205.7G yaml weights YOLOv4 CSPDarknet53 D910x8-G 608 MS COCO 2017 45.4 27.6M 52G yaml weights YOLOv4 CSPDarknet53(silu) D910x8-G 608 MS COCO 2017 45.8 27.6M 52G yaml weights YOLOv3 Darknet53 D910x8-G 640 MS COCO 2017 45.5 61.9M 156.4G yaml weights YOLOX N D910x8-G 416 MS COCO 2017 24.1 0.9M 1.1G yaml weights YOLOX Tiny D910x8-G 416 MS COCO 2017 33.3 5.1M 6.5G yaml weights YOLOX S D910x8-G 640 MS COCO 2017 40.7 9.0M 26.8G yaml weights YOLOX M D910x8-G 640 MS COCO 2017 46.7 25.3M 73.8G yaml weights YOLOX L D910x8-G 640 MS COCO 2017 49.2 54.2M 155.6G yaml weights YOLOX X D910x8-G 640 MS COCO 2017 51.6 99.1M 281.9G yaml weights YOLOX Darknet53 D910x8-G 640 MS COCO 2017 47.7 63.7M 185.3G yaml weights"},{"location":"modelzoo/#depoly-inference","title":"Depoly inference","text":"
  • See support list
"},{"location":"modelzoo/#notes","title":"Notes","text":"
  • Context: Training context denoted as {device}x{pieces}-{MS mode}, where mindspore mode can be G - graph mode or F - pynative mode with ms function. For example, D910x8-G is for training on 8 pieces of Ascend 910 NPU using graph mode.
  • Box mAP: Accuracy reported on the validation set.
"},{"location":"how_to_guides/write_a_new_model/","title":"Write A New Model","text":"

comming soon.

"},{"location":"notes/changelog/","title":"Change Log","text":"

Coming soon.

"},{"location":"notes/code_of_conduct/","title":"Code of Conduct","text":"

Coming soon.

"},{"location":"notes/contributing/","title":"Contributing","text":""},{"location":"notes/contributing/#mindyolo-contributing-guidelines","title":"MindYOLO contributing guidelines","text":"
  • MindYOLO contributing guidelines
    • Contributor License Agreement
    • Getting Started
    • Contribution workflow
      • Code style
      • Fork-Pull development model
      • Report issues
      • Propose PRs
"},{"location":"notes/contributing/#contributor-license-agreement","title":"Contributor License Agreement","text":"

It's required to sign CLA before your first code submission to MindYOLO community.

For individual contributor, please refer to ICLA online document for the detailed information.

"},{"location":"notes/contributing/#getting-started","title":"Getting Started","text":"
  • Fork the repository on Github.
  • Read the README.md.
"},{"location":"notes/contributing/#contribution-workflow","title":"Contribution Workflow","text":""},{"location":"notes/contributing/#code-style","title":"Code style","text":"

Please follow this style to make MindYOLO easy to review, maintain and develop.

  • Coding guidelines

    The Python coding style suggested by Python PEP 8 Coding Style and C++ coding style suggested by Google C++ Coding Guidelines are used in MindYOLO community. The CppLint, CppCheck, CMakeLint, CodeSpell, Lizard, ShellCheck and PyLint are used to check the format of codes, installing these plugins in your IDE is recommended.

  • Unittest guidelines

    The Python unittest style suggested by pytest and C++ unittest style suggested by Googletest Primer are used in MindYOLO community. The design intent of a testcase should be reflected by its name of comment.

  • Refactoring guidelines

    We encourage developers to refactor our code to eliminate the code smell. All codes should conform to needs to the coding style and testing style, and refactoring codes are no exception. Lizard threshold for nloc (lines of code without comments) is 100 and for cnc (cyclomatic complexity number) is 20, when you receive a Lizard warning, you have to refactor the code you want to merge.

  • Document guidelines

    We use MarkdownLint to check the format of markdown documents. MindYOLO CI modifies the following rules based on the default configuration. - MD007 (unordered list indentation): The indent parameter is set to 4, indicating that all content in the unordered list needs to be indented using four spaces. - MD009 (spaces at the line end): The br_spaces parameter is set to 2, indicating that there can be 0 or 2 spaces at the end of a line. - MD029 (sequence numbers of an ordered list): The style parameter is set to ordered, indicating that the sequence numbers of the ordered list are in ascending order.

    For details, please refer to RULES.

"},{"location":"notes/contributing/#fork-pull-development-model","title":"Fork-Pull development model","text":"
  • Fork MindYOLO repository

    Before submitting code to MindYOLO project, please make sure that this project have been forked to your own repository. It means that there will be parallel development between MindYOLO repository and your own repository, so be careful to avoid the inconsistency between them.

  • Clone the remote repository

    If you want to download the code to the local machine, git is the best way:

    # For GitHub\ngit clone https://github.com/{insert_your_forked_repo}/mindyolo.git\ngit remote add upstream https://github.com/mindspore-lab/mindyolo.git\n
  • Develop code locally

    To avoid inconsistency between multiple branches, checking out to a new branch is SUGGESTED:

    git checkout -b {new_branch_name} origin/master\n

    Taking the master branch as an example, MindYOLO may create version branches and downstream development branches as needed, please fix bugs upstream first. Then you can change the code arbitrarily.

  • Push the code to the remote repository

    After updating the code, you should push the update in the formal way:

    git add .\ngit status # Check the update status\ngit commit -m \"Your commit title\"\ngit commit -s --amend #Add the concrete description of your commit\ngit push origin {new_branch_name}\n
  • Pull a request to MindYOLO repository

    In the last step, your need to pull a compare request between your new branch and MindYOLO master branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test. Your pull request should be merged into the upstream master branch as soon as possible to reduce the risk of merging.

"},{"location":"notes/contributing/#report-issues","title":"Report issues","text":"

A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it!

When reporting issues, refer to this format:

  • What version of env (MindSpore, os, python, MindYOLO etc) are you using?
  • Is this a BUG REPORT or FEATURE REQUEST?
  • What kind of issue is, add the labels to highlight it on the issue dashboard.
  • What happened?
  • What you expected to happen?
  • How to reproduce it?(as minimally and precisely as possible)
  • Special notes for your reviewers?

Issues advisory:

  • If you find an unclosed issue, which is exactly what you are going to solve, please put some comments on that issue to tell others you would be in charge of it.
  • If an issue is opened for a while, it's recommended for contributors to precheck before working on solving that issue.
  • If you resolve an issue which is reported by yourself, it's also required to let others know before closing that issue.
  • If you want the issue to be responded as quickly as possible, please try to label it, you can find kinds of labels on Label List
"},{"location":"notes/contributing/#propose-prs","title":"Propose PRs","text":"
  • Raise your idea as an issue on GitHub
  • If it is a new feature that needs lots of design details, a design proposal should also be submitted.
  • After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
  • None of PRs is not permitted until it receives 2+ LGTM from approvers. Please NOTICE that approver is NOT allowed to add LGTM on his own PR.
  • After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.

PRs advisory:

  • Any irrelevant changes should be avoided.
  • Make sure your commit history being ordered.
  • Always keep your branch up with the master branch.
  • For bug-fix PRs, make sure all related issues being linked.
"},{"location":"notes/faq/","title":"FAQ","text":"

Coming soon.

"},{"location":"reference/data/","title":"Data","text":""},{"location":"reference/data/#comming-soon","title":"comming soon","text":""},{"location":"reference/loss/","title":"Loss","text":""},{"location":"reference/loss/#loss-factory","title":"Loss Factory","text":""},{"location":"reference/models/","title":"Models","text":""},{"location":"reference/models/#create-model","title":"Create Model","text":""},{"location":"reference/models/#mindyolo.models.model_factory.create_model","title":"mindyolo.models.model_factory.create_model(model_name, model_cfg=None, in_channels=3, num_classes=80, checkpoint_path='', **kwargs)","text":"Source code in mindyolo/models/model_factory.py
def create_model(\n    model_name: str,\n    model_cfg: dict = None,\n    in_channels: int = 3,\n    num_classes: int = 80,\n    checkpoint_path: str = \"\",\n    **kwargs,\n):\n    model_args = dict(cfg=model_cfg, num_classes=num_classes, in_channels=in_channels)\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n\n    if not is_model(model_name):\n        raise RuntimeError(f\"Unknown model {model_name}\")\n\n    create_fn = model_entrypoint(model_name)\n    model = create_fn(**model_args, **kwargs)\n\n    if checkpoint_path:\n        assert os.path.isfile(checkpoint_path) and checkpoint_path.endswith(\n            \".ckpt\"\n        ), f\"[{checkpoint_path}] not a ckpt file.\"\n        checkpoint_param = load_checkpoint(checkpoint_path)\n        load_param_into_net(model, checkpoint_param)\n        logger.info(f\"Load checkpoint from [{checkpoint_path}] success.\")\n\n    return model\n
"},{"location":"reference/models/#yolov3_head","title":"yolov3_head","text":""},{"location":"reference/models/#yolov4_head","title":"yolov4_head","text":""},{"location":"reference/models/#yolov5_head","title":"yolov5_head","text":""},{"location":"reference/models/#yolov7_head","title":"yolov7_head","text":""},{"location":"reference/models/#yolov8_head","title":"yolov8_head","text":""},{"location":"reference/models/#yolox_head","title":"yolox_head","text":""},{"location":"reference/models/#initializer","title":"initializer","text":""},{"location":"reference/models/#focal_loss","title":"focal_loss","text":""},{"location":"reference/models/#iou_loss","title":"iou_loss","text":""},{"location":"reference/models/#label_assignment","title":"label_assignment","text":""},{"location":"reference/models/#loss_factory","title":"loss_factory","text":""},{"location":"reference/models/#yolov3_loss","title":"yolov3_loss","text":""},{"location":"reference/models/#yolov4_loss","title":"yolov4_loss","text":""},{"location":"reference/models/#yolov5_loss","title":"yolov5_loss","text":""},{"location":"reference/models/#yolov7_loss","title":"yolov7_loss","text":""},{"location":"reference/models/#yolov8_loss","title":"yolov8_loss","text":""},{"location":"reference/models/#yolox_loss","title":"yolox_loss","text":""},{"location":"reference/models/#yolov3","title":"yolov3","text":""},{"location":"reference/models/#yolov4","title":"yolov4","text":""},{"location":"reference/models/#yolov5","title":"yolov5","text":""},{"location":"reference/models/#yolov7","title":"yolov7","text":""},{"location":"reference/models/#yolov8","title":"yolov8","text":""},{"location":"reference/models/#yolox","title":"yolox","text":""},{"location":"tutorials/configuration/","title":"Configuration","text":""},{"location":"tutorials/configuration/#_1","title":"\u914d\u7f6e","text":"

MindYOLO\u5957\u4ef6\u540c\u65f6\u652f\u6301yaml\u6587\u4ef6\u53c2\u6570\u548c\u547d\u4ee4\u884c\u53c2\u6570\u89e3\u6790\uff0c\u5e76\u5c06\u76f8\u5bf9\u56fa\u5b9a\u3001\u4e0e\u6a21\u578b\u5f3a\u76f8\u5173\u3001\u8f83\u4e3a\u590d\u6742\u6216\u8005\u542b\u6709\u5d4c\u5957\u7ed3\u6784\u7684\u53c2\u6570\u7f16\u5199\u6210yaml\u6587\u4ef6\uff0c\u9700\u6839\u636e\u5b9e\u9645\u5e94\u7528\u573a\u666f\u66f4\u6539\u6216\u8005\u8f83\u4e3a\u7b80\u5355\u7684\u53c2\u6570\u5219\u901a\u8fc7\u547d\u4ee4\u884c\u4f20\u5165\u3002

\u4e0b\u9762\u4ee5yolov3\u4e3a\u4f8b\uff0c\u89e3\u91ca\u5982\u4f55\u914d\u7f6e\u76f8\u5e94\u7684\u53c2\u6570\u3002

"},{"location":"tutorials/configuration/#_2","title":"\u53c2\u6570\u7ee7\u627f\u5173\u7cfb","text":"

\u53c2\u6570\u4f18\u5148\u7ea7\u7531\u9ad8\u5230\u4f4e\u5982\u4e0b\uff0c\u51fa\u73b0\u540c\u540d\u53c2\u6570\u65f6\uff0c\u4f4e\u4f18\u5148\u7ea7\u53c2\u6570\u4f1a\u88ab\u9ad8\u4f18\u5148\u7ea7\u53c2\u6570\u8986\u76d6

  • \u7528\u6237\u547d\u4ee4\u884c\u4f20\u5165\u53c2\u6570
  • python\u6267\u884cpy\u6587\u4ef6\u4e2dparser\u7684\u9ed8\u8ba4\u53c2\u6570
  • \u547d\u4ee4\u884c\u4f20\u5165config\u53c2\u6570\u5bf9\u5e94\u7684yaml\u6587\u4ef6\u53c2\u6570
  • \u547d\u4ee4\u884c\u4f20\u5165config\u53c2\u6570\u5bf9\u5e94\u7684yaml\u6587\u4ef6\u4e2d__BASE__\u53c2\u6570\u4e2d\u5305\u542b\u7684yaml\u6587\u4ef6\u53c2\u6570\uff0c\u4f8b\u5982yolov3.yaml\u542b\u6709\u5982\u4e0b\u53c2\u6570\uff1a
    __BASE__: [\n'../coco.yaml',\n'./hyp.scratch.yaml',\n]\n
"},{"location":"tutorials/configuration/#_3","title":"\u57fa\u7840\u53c2\u6570","text":""},{"location":"tutorials/configuration/#_4","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • device_target: \u6240\u7528\u8bbe\u5907\uff0cAscend/GPU/CPU
  • save_dir: \u8fd0\u884c\u7ed3\u679c\u4fdd\u5b58\u8def\u5f84\uff0c\u9ed8\u8ba4\u4e3a./runs
  • log_interval: \u6253\u5370\u65e5\u5fd7step\u95f4\u9694\uff0c\u9ed8\u8ba4\u4e3a100
  • is_parallel: \u662f\u5426\u5206\u5e03\u5f0f\u8bad\u7ec3\uff0c\u9ed8\u8ba4\u4e3aFalse
  • ms_mode: \u4f7f\u7528\u9759\u6001\u56fe\u6a21\u5f0f(0)\u6216\u52a8\u6001\u56fe\u6a21\u5f0f(1)\uff0c\u9ed8\u8ba4\u4e3a0\u3002
  • config: yaml\u914d\u7f6e\u6587\u4ef6\u8def\u5f84
  • per_batch_size: \u6bcf\u5f20\u5361batch size\uff0c\u9ed8\u8ba4\u4e3a32
  • epochs: \u8bad\u7ec3epoch\u6570\uff0c\u9ed8\u8ba4\u4e3a300
  • ...
"},{"location":"tutorials/configuration/#parse","title":"parse\u53c2\u6570\u8bbe\u7f6e","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u901a\u5e38\u7531\u547d\u4ee4\u884c\u4f20\u5165\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True --log_interval 50\n
"},{"location":"tutorials/configuration/#_5","title":"\u6570\u636e\u96c6","text":""},{"location":"tutorials/configuration/#_6","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • dataset_name: \u6570\u636e\u96c6\u540d\u79f0
  • train_set: \u8bad\u7ec3\u96c6\u6240\u5728\u8def\u5f84
  • val_set: \u9a8c\u8bc1\u96c6\u6240\u5728\u8def\u5f84
  • test_set: \u6d4b\u8bd5\u96c6\u6240\u5728\u8def\u5f84
  • nc: \u6570\u636e\u96c6\u7c7b\u522b\u6570
  • names: \u7c7b\u522b\u540d\u79f0
  • ...
"},{"location":"tutorials/configuration/#yaml","title":"yaml\u6587\u4ef6\u6837\u4f8b","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u5728configs/coco.yaml\u4e2d\u5b9a\u4e49\uff0c\u901a\u5e38\u9700\u4fee\u6539\u5176\u4e2d\u7684\u6570\u636e\u96c6\u8def\u5f84

```yaml data: dataset_name: coco

train_set: ./coco/train2017.txt # 118287 images val_set: ./coco/val2017.txt # 5000 images test_set: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

nc: 80

# class names names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] ```

"},{"location":"tutorials/configuration/#_7","title":"\u6570\u636e\u589e\u5f3a","text":""},{"location":"tutorials/configuration/#_8","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • num_parallel_workers: \u8bfb\u53d6\u6570\u636e\u7684\u5de5\u4f5c\u8fdb\u7a0b\u6570
  • train_transformers: \u8bad\u7ec3\u8fc7\u7a0b\u6570\u636e\u589e\u5f3a
  • test_transformers: \u9a8c\u8bc1\u8fc7\u7a0b\u6570\u636e\u589e\u5f3a
  • ...
"},{"location":"tutorials/configuration/#yaml_1","title":"yaml\u6587\u4ef6\u6837\u4f8b","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u5728configs/yolov3/hyp.scratch.yaml\u4e2d\u5b9a\u4e49\uff0c\u5176\u4e2dtrain_transformers\u548ctest_transformers\u5747\u4e3a\u7531\u5b57\u5178\u7ec4\u6210\u7684\u5217\u8868\uff0c\u5404\u5b57\u5178\u5305\u542b\u6570\u636e\u589e\u5f3a\u64cd\u4f5c\u540d\u79f0\u3001\u53d1\u751f\u6982\u7387\u53ca\u8be5\u589e\u5f3a\u65b9\u6cd5\u76f8\u5173\u7684\u53c2\u6570

```yaml data: num_parallel_workers: 4

train_transforms: - { func_name: mosaic, prob: 1.0, mosaic9_prob: 0.0, translate: 0.1, scale: 0.9 } - { func_name: mixup, prob: 0.1, alpha: 8.0, beta: 8.0, needed_mosaic: True } - { func_name: hsv_augment, prob: 1.0, hgain: 0.015, sgain: 0.7, vgain: 0.4 } - { func_name: label_norm, xyxy2xywh_: True } - { func_name: albumentations } - { func_name: fliplr, prob: 0.5 } - { func_name: label_pad, padding_size: 160, padding_value: -1 } - { func_name: image_norm, scale: 255. } - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True }

test_transforms: - { func_name: letterbox, scaleup: False } - { func_name: label_norm, xyxy2xywh_: True } - { func_name: label_pad, padding_size: 160, padding_value: -1 } - { func_name: image_norm, scale: 255. } - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True } ```

"},{"location":"tutorials/configuration/#_9","title":"\u6a21\u578b","text":""},{"location":"tutorials/configuration/#_10","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • model_name: \u6a21\u578b\u540d\u79f0
  • depth_multiple: \u6a21\u578b\u6df1\u5ea6\u56e0\u5b50
  • width_multiple: \u6a21\u578b\u5bbd\u5ea6\u56e0\u5b50
  • stride: \u7279\u5f81\u56fe\u4e0b\u91c7\u6837\u500d\u6570
  • anchors: \u9884\u8bbe\u951a\u6846
  • backbone: \u6a21\u578b\u9aa8\u5e72\u7f51\u7edc
  • head: \u6a21\u578b\u68c0\u6d4b\u5934
"},{"location":"tutorials/configuration/#yaml_2","title":"yaml\u6587\u4ef6\u6837\u4f8b","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u5728configs/yolov3/yolov3.yaml\u4e2d\u5b9a\u4e49\uff0c\u6839\u636ebackbon\u548chead\u53c2\u6570\u8fdb\u884c\u7f51\u7edc\u6784\u5efa\uff0c\u53c2\u6570\u4ee5\u5d4c\u5957\u5217\u8868\u7684\u5f62\u5f0f\u5448\u73b0\uff0c\u6bcf\u884c\u4ee3\u8868\u4e00\u5c42\u6a21\u5757\uff0c\u5305\u542b4\u4e2a\u53c2\u6570\uff0c\u5206\u522b\u662f \u8f93\u5165\u5c42\u7f16\u53f7(-1\u4ee3\u8868\u4e0a\u4e00\u5c42)\u3001\u6a21\u5757\u91cd\u590d\u6b21\u6570\u3001\u6a21\u5757\u540d\u79f0\u548c\u6a21\u5757\u76f8\u5e94\u53c2\u6570\u3002\u7528\u6237\u4e5f\u53ef\u4ee5\u4e0d\u501f\u52a9yaml\u6587\u4ef6\u800c\u76f4\u63a5\u5728py\u6587\u4ef6\u4e2d\u5b9a\u4e49\u548c\u6ce8\u518c\u7f51\u7edc\u3002 ```yaml network: model_name: yolov3

depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple stride: [8, 16, 32] anchors: - [10,13, 16,30, 33,23] # P\u215c - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32

# darknet53 backbone backbone: # [from, number, module, args] [[-1, 1, ConvNormAct, [32, 3, 1]], # 0 [-1, 1, ConvNormAct, [64, 3, 2]], # 1-P\u00bd [-1, 1, Bottleneck, [64]], [-1, 1, ConvNormAct, [128, 3, 2]], # 3-P2/4 [-1, 2, Bottleneck, [128]], [-1, 1, ConvNormAct, [256, 3, 2]], # 5-P\u215c [-1, 8, Bottleneck, [256]], [-1, 1, ConvNormAct, [512, 3, 2]], # 7-P4/16 [-1, 8, Bottleneck, [512]], [-1, 1, ConvNormAct, [1024, 3, 2]], # 9-P5/32 [-1, 4, Bottleneck, [1024]], # 10 ]

# YOLOv3 head head: [[-1, 1, Bottleneck, [1024, False]], [-1, 1, ConvNormAct, [512, 1, 1]], [-1, 1, ConvNormAct, [1024, 3, 1]], [-1, 1, ConvNormAct, [512, 1, 1]], [-1, 1, ConvNormAct, [1024, 3, 1]], # 15 (P5/32-large)

 [-2, 1, ConvNormAct, [256, 1, 1]],\n [-1, 1, Upsample, [None, 2, 'nearest']],\n [[-1, 8], 1, Concat, [1]],  # cat backbone P4\n [-1, 1, Bottleneck, [512, False]],\n [-1, 1, Bottleneck, [512, False]],\n [-1, 1, ConvNormAct, [256, 1, 1]],\n [-1, 1, ConvNormAct, [512, 3, 1]],  # 22 (P4/16-medium)\n\n [-2, 1, ConvNormAct, [128, 1, 1]],\n [-1, 1, Upsample, [None, 2, 'nearest']],\n [[-1, 6], 1, Concat, [1]],  # cat backbone P3\n [-1, 1, Bottleneck, [256, False]],\n [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)\n\n [[27, 22, 15], 1, YOLOv3Head, [nc, anchors, stride]],   # Detect(P3, P4, P5)\n]\n

```

"},{"location":"tutorials/configuration/#_11","title":"\u635f\u5931\u51fd\u6570","text":""},{"location":"tutorials/configuration/#_12","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • name: \u635f\u5931\u51fd\u6570\u540d\u79f0
  • box: box\u635f\u5931\u6743\u91cd
  • cls: class\u635f\u5931\u6743\u91cd
  • cls_pw: class\u635f\u5931\u6b63\u6837\u672c\u6743\u91cd
  • obj: object\u635f\u5931\u6743\u91cd
  • obj_pw: object\u635f\u5931\u6b63\u6837\u672c\u6743\u91cd
  • fl_gamma: focal loss gamma
  • anchor_t: anchor shape\u6bd4\u4f8b\u9608\u503c
  • label_smoothing: \u6807\u7b7e\u5e73\u6ed1\u503c
"},{"location":"tutorials/configuration/#yaml_3","title":"yaml\u6587\u4ef6\u6837\u4f8b","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u5728configs/yolov3/hyp.scratch.yaml\u4e2d\u5b9a\u4e49

yaml loss: name: YOLOv7Loss box: 0.05 # box loss gain cls: 0.5 # cls loss gain cls_pw: 1.0 # cls BCELoss positive_weight obj: 1.0 # obj loss gain (scale with pixels) obj_pw: 1.0 # obj BCELoss positive_weight fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) anchor_t: 4.0 # anchor-multiple threshold label_smoothing: 0.0 # label smoothing epsilon

"},{"location":"tutorials/configuration/#_13","title":"\u4f18\u5316\u5668","text":""},{"location":"tutorials/configuration/#_14","title":"\u53c2\u6570\u8bf4\u660e","text":"
  • optimizer: \u4f18\u5316\u5668\u540d\u79f0\u3002
  • lr_init: \u5b66\u4e60\u7387\u521d\u59cb\u503c
  • warmup_epochs: warmup epoch\u6570
  • warmup_momentum: warmup momentum\u521d\u59cb\u503c
  • warmup_bias_lr: warmup bias\u5b66\u4e60\u7387\u521d\u59cb\u503c
  • min_warmup_step: \u6700\u5c0fwarmup step\u6570
  • group_param: \u53c2\u6570\u5206\u7ec4\u7b56\u7565
  • gp_weight_decay: \u5206\u7ec4\u53c2\u6570\u6743\u91cd\u8870\u51cf\u7cfb\u6570
  • start_factor: \u521d\u59cb\u5b66\u4e60\u7387\u56e0\u6570
  • end_factor: \u7ed3\u675f\u5b66\u4e60\u7387\u56e0\u6570
  • momentum\uff1a\u79fb\u52a8\u5e73\u5747\u7684\u52a8\u91cf
  • loss_scale\uff1aloss\u7f29\u653e\u7cfb\u6570
  • nesterov\uff1a\u662f\u5426\u4f7f\u7528Nesterov Accelerated Gradient (NAG)\u7b97\u6cd5\u66f4\u65b0\u68af\u5ea6\u3002
"},{"location":"tutorials/configuration/#yaml_4","title":"yaml\u6587\u4ef6\u6837\u4f8b","text":"

\u8be5\u90e8\u5206\u53c2\u6570\u5728configs/yolov3/hyp.scratch.yaml\u4e2d\u5b9a\u4e49\uff0c\u5982\u4e0b\u793a\u4f8b\u4e2d\u7ecf\u8fc7warmup\u9636\u6bb5\u540e\u7684\u521d\u59cb\u5b66\u4e60\u7387\u4e3alr_init * start_factor = 0.01 * 1.0 = 0.01, \u6700\u7ec8\u5b66\u4e60\u7387\u4e3alr_init * end_factor = 0.01 * 0.01 = 0.0001

yaml optimizer: optimizer: momentum lr_init: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) momentum: 0.937 # SGD momentum/Adam beta1 nesterov: True # update gradients with NAG(Nesterov Accelerated Gradient) algorithm loss_scale: 1.0 # loss scale for optimizer warmup_epochs: 3 # warmup epochs (fractions ok) warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr: 0.1 # warmup initial bias lr min_warmup_step: 1000 # minimum warmup step group_param: yolov7 # group param strategy gp_weight_decay: 0.0005 # group param weight decay 5e-4 start_factor: 1.0 end_factor: 0.01

"},{"location":"tutorials/deployment/","title":"Deployment","text":""},{"location":"tutorials/deployment/#mindyolo","title":"MindYOLO\u90e8\u7f72","text":""},{"location":"tutorials/deployment/#_1","title":"\u4f9d\u8d56","text":"
pip install -r requirement.txt\n
"},{"location":"tutorials/deployment/#mindspore-lite","title":"MindSpore Lite\u73af\u5883\u51c6\u5907","text":"

\u53c2\u8003\uff1aLite\u73af\u5883\u914d\u7f6e \u6ce8\u610f\uff1aMindSpore Lite\u9002\u914d\u7684python\u73af\u5883\u4e3a3.7\uff0c\u8bf7\u5728\u5b89\u88c5Lite\u524d\u51c6\u5907\u597dpython3.7\u7684\u73af\u5883 1. \u6839\u636e\u73af\u5883\uff0c\u4e0b\u8f7d\u914d\u5957\u7684tar.gz\u5305\u548cwhl\u5305 2. \u89e3\u538btar.gz\u5305\u5e76\u5b89\u88c5\u5bf9\u5e94\u7248\u672c\u7684whl\u5305

tar -zxvf mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.tar.gz\npip install mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.whl\n
3. \u914d\u7f6eLite\u7684\u73af\u5883\u53d8\u91cf LITE_HOME\u4e3atar.gz\u89e3\u538b\u51fa\u7684\u6587\u4ef6\u5939\u8def\u5f84\uff0c\u63a8\u8350\u4f7f\u7528\u7edd\u5bf9\u8def\u5f84
export LITE_HOME=/path/to/mindspore-lite-{version}-{os}-{platform}\nexport LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH\nexport PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH\n

"},{"location":"tutorials/deployment/#_2","title":"\u5feb\u901f\u5f00\u59cb","text":""},{"location":"tutorials/deployment/#_3","title":"\u6a21\u578b\u8f6c\u6362","text":"

ckpt\u6a21\u578b\u8f6c\u4e3amindir\u6a21\u578b\uff0c\u6b64\u6b65\u9aa4\u53ef\u5728CPU/Ascend910\u4e0a\u8fd0\u884c

python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target [CPU/Ascend]\ne.g.\n# \u5728CPU\u4e0a\u8fd0\u884c\npython ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU\n# \u5728Ascend\u4e0a\u8fd0\u884c\npython ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target Ascend\n

"},{"location":"tutorials/deployment/#lite-test","title":"Lite Test","text":"
python deploy/test.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_config/yolo.yaml\ne.g.\npython deploy/test.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml\n
"},{"location":"tutorials/deployment/#lite-predict","title":"Lite Predict","text":"
python ./deploy/predict.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg\ne.g.\npython deploy/predict.py --model_type Lite --model_path ./yolov5n.mindir --conifg ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg\n
"},{"location":"tutorials/deployment/#_4","title":"\u811a\u672c\u8bf4\u660e","text":"
  • predict.py \u652f\u6301\u5355\u5f20\u56fe\u7247\u63a8\u7406
  • test.py \u652f\u6301COCO\u6570\u636e\u96c6\u63a8\u7406
"},{"location":"tutorials/deployment/#mindx","title":"MindX\u90e8\u7f72","text":"

\u67e5\u770b MINDX

"},{"location":"tutorials/deployment/#_5","title":"\u6807\u51c6\u548c\u652f\u6301\u7684\u6a21\u578b\u5e93","text":"
  • YOLOv7
  • YOLOv5
  • YOLOv3
  • YOLOv8
  • YOLOv4
  • YOLOX
Name Scale Context ImageSize Dataset Box mAP (%) Params FLOPs Recipe Download YOLOv8 N D310x1-G 640 MS COCO 2017 37.2 3.2M 8.7G yaml ckpt mindir YOLOv8 S D310x1-G 640 MS COCO 2017 44.6 11.2M 28.6G yaml ckpt mindir YOLOv8 M D310x1-G 640 MS COCO 2017 50.5 25.9M 78.9G yaml ckpt mindir YOLOv8 L D310x1-G 640 MS COCO 2017 52.8 43.7M 165.2G yaml ckpt mindir YOLOv8 X D310x1-G 640 MS COCO 2017 53.7 68.2M 257.8G yaml ckpt mindir YOLOv7 Tiny D310x1-G 640 MS COCO 2017 37.5 6.2M 13.8G yaml ckpt mindir YOLOv7 L D310x1-G 640 MS COCO 2017 50.8 36.9M 104.7G yaml ckpt mindir YOLOv7 X D310x1-G 640 MS COCO 2017 52.4 71.3M 189.9G yaml ckpt mindir YOLOv5 N D310x1-G 640 MS COCO 2017 27.3 1.9M 4.5G yaml ckpt mindir YOLOv5 S D310x1-G 640 MS COCO 2017 37.6 7.2M 16.5G yaml ckpt mindir YOLOv5 M D310x1-G 640 MS COCO 2017 44.9 21.2M 49.0G yaml ckpt mindir YOLOv5 L D310x1-G 640 MS COCO 2017 48.5 46.5M 109.1G yaml ckpt mindir YOLOv5 X D310x1-G 640 MS COCO 2017 50.5 86.7M 205.7G yaml ckpt mindir YOLOv4 CSPDarknet53 D310x1-G 608 MS COCO 2017 45.4 27.6M 52G yaml ckpt mindir YOLOv4 CSPDarknet53(silu) D310x1-G 640 MS COCO 2017 45.8 27.6M 52G yaml ckpt mindir YOLOv3 Darknet53 D310x1-G 640 MS COCO 2017 45.5 61.9M 156.4G yaml ckpt mindir YOLOX N D310x1-G 416 MS COCO 2017 24.1 0.9M 1.1G yaml ckpt mindir YOLOX Tiny D310x1-G 416 MS COCO 2017 33.3 5.1M 6.5G yaml ckpt mindir YOLOX S D310x1-G 640 MS COCO 2017 40.7 9.0M 26.8G yaml ckpt mindir YOLOX M D310x1-G 640 MS COCO 2017 46.7 25.3M 73.8G yaml ckpt mindir YOLOX L D310x1-G 640 MS COCO 2017 49.2 54.2M 155.6G yaml ckpt mindir YOLOX X D310x1-G 640 MS COCO 2017 51.6 99.1M 281.9G yaml ckpt mindir YOLOX Darknet53 D310x1-G 640 MS COCO 2017 47.7 63.7M 185.3G yaml ckpt mindir"},{"location":"tutorials/finetune/","title":"Finetune","text":""},{"location":"tutorials/finetune/#finetune_1","title":"\u81ea\u5b9a\u4e49\u6570\u636e\u96c6finetune\u6d41\u7a0b","text":"

\u672c\u6587\u4ee5\u5b89\u5168\u5e3d\u4f69\u6234\u68c0\u6d4b\u6570\u636e\u96c6(SHWD)\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u81ea\u5b9a\u4e49\u6570\u636e\u96c6\u5728MindYOLO\u4e0a\u8fdb\u884cfinetune\u7684\u4e3b\u8981\u6d41\u7a0b\u3002

"},{"location":"tutorials/finetune/#_1","title":"\u6570\u636e\u96c6\u683c\u5f0f\u8f6c\u6362","text":"

SHWD\u6570\u636e\u96c6\u91c7\u7528voc\u683c\u5f0f\u7684\u6570\u636e\u6807\u6ce8\uff0c\u5176\u6587\u4ef6\u76ee\u5f55\u5982\u4e0b\u6240\u793a\uff1a

             ROOT_DIR\n                \u251c\u2500\u2500 Annotations\n                \u2502        \u251c\u2500\u2500 000000.xml\n                \u2502        \u2514\u2500\u2500 000002.xml\n                \u251c\u2500\u2500 ImageSets\n                \u2502       \u2514\u2500\u2500 Main\n                \u2502             \u251c\u2500\u2500 test.txt\n                \u2502             \u251c\u2500\u2500 train.txt\n                \u2502             \u251c\u2500\u2500 trainval.txt\n                \u2502             \u2514\u2500\u2500 val.txt\n                \u2514\u2500\u2500 JPEGImages\n                        \u251c\u2500\u2500 000000.jpg\n                        \u2514\u2500\u2500 000002.jpg\n
\u5176\u4e2d\uff0cImageSets/Main\u6587\u4ef6\u4e0b\u7684txt\u6587\u4ef6\u4e2d\u6bcf\u884c\u4ee3\u8868\u76f8\u5e94\u5b50\u96c6\u4e2d\u5355\u5f20\u56fe\u7247\u4e0d\u542b\u540e\u7f00\u7684\u6587\u4ef6\u540d\uff0c\u4f8b\u5982\uff1a
000002\n000005\n000019\n000022\n000027\n000034\n

\u7531\u4e8eMindYOLO\u5728\u9a8c\u8bc1\u9636\u6bb5\u9009\u7528\u56fe\u7247\u540d\u79f0\u4f5c\u4e3aimage_id\uff0c\u56e0\u6b64\u56fe\u7247\u540d\u79f0\u53ea\u80fd\u4e3a\u6570\u503c\u7c7b\u578b\uff0c\u800c\u4e0d\u80fd\u4e3a\u5b57\u7b26\u4e32\u7c7b\u578b\uff0c\u8fd8\u9700\u8981\u5bf9\u56fe\u7247\u8fdb\u884c\u6539\u540d\u3002\u5bf9SHWD\u6570\u636e\u96c6\u683c\u5f0f\u7684\u8f6c\u6362\u5305\u542b\u5982\u4e0b\u6b65\u9aa4\uff1a * \u5c06\u56fe\u7247\u590d\u5236\u5230\u76f8\u5e94\u7684\u8def\u5f84\u4e0b\u5e76\u6539\u540d * \u5728\u6839\u76ee\u5f55\u4e0b\u76f8\u5e94\u7684txt\u6587\u4ef6\u4e2d\u5199\u5165\u8be5\u56fe\u7247\u7684\u76f8\u5bf9\u8def\u5f84 * \u89e3\u6790xml\u6587\u4ef6\uff0c\u5728\u76f8\u5e94\u8def\u5f84\u4e0b\u751f\u6210\u5bf9\u5e94\u7684txt\u6807\u6ce8\u6587\u4ef6 * \u9a8c\u8bc1\u96c6\u8fd8\u9700\u751f\u6210\u6700\u7ec8\u7684json\u6587\u4ef6

\u8be6\u7ec6\u5b9e\u73b0\u53ef\u53c2\u8003convert_shwd2yolo.py\u3002\u8fd0\u884c\u65b9\u5f0f\u5982\u4e0b\uff1a

python examples/finetune_SHWD/convert_shwd2yolo.py --root_dir /path_to_shwd/SHWD\n

\u8fd0\u884c\u4ee5\u4e0a\u547d\u4ee4\u5c06\u5728\u4e0d\u6539\u53d8\u539f\u6570\u636e\u96c6\u7684\u524d\u63d0\u4e0b\uff0c\u5728\u540c\u7ea7\u76ee\u5f55\u751f\u6210yolo\u683c\u5f0f\u7684SHWD\u6570\u636e\u96c6\u3002

"},{"location":"tutorials/finetune/#_2","title":"\u9884\u8bad\u7ec3\u6a21\u578b\u6587\u4ef6\u8f6c\u6362","text":"

\u7531\u4e8eSHWD\u6570\u636e\u96c6\u53ea\u67097000+\u5f20\u56fe\u7247\uff0c\u9009\u62e9yolov7-tiny\u8fdb\u884c\u8be5\u6570\u636e\u96c6\u7684\u8bad\u7ec3\uff0c\u53ef\u4e0b\u8f7dMindYOLO\u63d0\u4f9b\u7684\u5728coco\u6570\u636e\u96c6\u4e0a\u8bad\u7ec3\u597d\u7684\u6a21\u578b\u6587\u4ef6\u4f5c\u4e3a\u9884\u8bad\u7ec3\u6a21\u578b\u3002\u7531\u4e8ecoco\u6570\u636e\u96c6\u542b\u670980\u79cd\u7269\u4f53\u7c7b\u522b\uff0cSHWD\u6570\u636e\u96c6\u53ea\u6709\u4e24\u7c7b\uff0c\u6a21\u578b\u7684\u6700\u540e\u4e00\u5c42head\u5c42\u8f93\u51fa\u4e0e\u7c7b\u522b\u6570nc\u6709\u5173\uff0c\u56e0\u6b64\u9700\u5c06\u9884\u8bad\u7ec3\u6a21\u578b\u6587\u4ef6\u7684\u6700\u540e\u4e00\u5c42\u53bb\u6389\uff0c \u53ef\u53c2\u8003convert_yolov7-tiny_pretrain_ckpt.py\u3002\u8fd0\u884c\u65b9\u5f0f\u5982\u4e0b\uff1a

python examples/finetune_SHWD/convert_yolov7-tiny_pretrain_ckpt.py\n
"},{"location":"tutorials/finetune/#finetune_2","title":"\u6a21\u578b\u5fae\u8c03(Finetune)","text":"

\u7b80\u8981\u7684\u8bad\u7ec3\u6d41\u7a0b\u53ef\u53c2\u8003finetune_shwd.py

  • \u5728\u591a\u5361NPU/GPU\u4e0a\u8fdb\u884c\u5206\u5e03\u5f0f\u6a21\u578b\u8bad\u7ec3\uff0c\u4ee58\u5361\u4e3a\u4f8b:
mpirun --allow-run-as-root -n 8 python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml --is_parallel True\n
  • \u5728\u5355\u5361NPU/GPU/CPU\u4e0a\u8bad\u7ec3\u6a21\u578b\uff1a
python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml 

\u6ce8\u610f\uff1a\u76f4\u63a5\u7528yolov7-tiny\u9ed8\u8ba4coco\u53c2\u6570\u5728SHWD\u6570\u636e\u96c6\u4e0a\u8bad\u7ec3\uff0c\u53ef\u53d6\u5f97AP50 87.0\u7684\u7cbe\u5ea6\u3002\u5c06lr_init\u53c2\u6570\u75310.01\u6539\u4e3a0.001\uff0c\u5373\u53ef\u5b9e\u73b0ap50\u4e3a89.2\u7684\u7cbe\u5ea6\u7ed3\u679c\u3002

"},{"location":"tutorials/modelarts/","title":"Modelarts","text":""},{"location":"tutorials/quick_start/","title":"Quick Start","text":""},{"location":"tutorials/quick_start/#getting-started-with-mindyolo","title":"Getting Started with MindYOLO","text":"

This document provides a brief introduction to the usage of built-in command-line tools in MindYOLO.

"},{"location":"tutorials/quick_start/#inference-demo-with-pre-trained-models","title":"Inference Demo with Pre-trained Models","text":"
  1. Pick a model and its config file from the model zoo, such as, ./configs/yolov7/yolov7.yaml.
  2. Download the corresponding pre-trained checkpoint from the model zoo of each model.
  3. To run YOLO object detection with the built-in configs, please run:
# Run with Ascend (By default)\npython demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg\n\n# Run with GPU\npython demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg --device_target=GPU\n

For details of the command line arguments, see demo/predict.py -h or look at its source code to understand their behavior. Some common arguments are: * To run on cpu, modify device_target to CPU. * The results will be saved in ./detect_results

"},{"location":"tutorials/quick_start/#training-evaluation-in-command-line","title":"Training & Evaluation in Command Line","text":"
  • Prepare your dataset in YOLO format. If trained with COCO (YOLO format), prepare it from yolov5 or the darknet.
  coco/\n    {train,val}2017.txt\n    annotations/\n      instances_{train,val}2017.json\n    images/\n      {train,val}2017/\n          00000001.jpg\n          ...\n          # image files that are mentioned in the corresponding train/val2017.txt\n    labels/\n      {train,val}2017/\n          00000001.txt\n          ...\n          # label files that are mentioned in the corresponding train/val2017.txt\n
  • To train a model on 8 NPUs/GPUs:

    mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True\n

  • To train a model on 1 NPU/GPU/CPU:

    python train.py --config ./configs/yolov7/yolov7.yaml \n

  • To evaluate a model's performance:

    python test.py --config ./configs/yolov7/yolov7.yaml --weight /path_to_ckpt/WEIGHT.ckpt\n
    Notes: (1) The default hyper-parameter is used for 8-card training, and some parameters need to be adjusted in the case of a single card. (2) The default device is Ascend, and you can modify it by specifying 'device_target' as Ascend/GPU/CPU, as these are currently supported.

  • For more options, see train/test.py -h.
"},{"location":"tutorials/quick_start/#deployment","title":"Deployment","text":"

See here.

"},{"location":"tutorials/quick_start/#to-use-mindyolo-apis-in-your-code","title":"To use MindYOLO APIs in Your Code","text":"

To be supplemented.

"},{"location":"zh/","title":"\u4e3b\u9875","text":""},{"location":"zh/#mindyolo","title":"MindYOLO","text":"

MindYOLO\u662fMindSpore Lab\u5f00\u53d1\u7684AI\u5957\u4ef6\uff0c\u5b9e\u73b0\u4e86\u6700\u5148\u8fdb\u7684YOLO\u7cfb\u5217\u7b97\u6cd5\uff0c\u67e5\u770b\u652f\u6301\u7684\u6a21\u578b\u7b97\u6cd5\u3002

MindYOLO\u4f7f\u7528Python\u8bed\u8a00\u7f16\u5199\uff0c\u57fa\u4e8e MindSpore AI\u6846\u67b6\u5f00\u53d1\u3002

master \u5206\u652f\u914d\u5957 MindSpore 2.0\u3002

"},{"location":"zh/#_1","title":"\u65b0\u7279\u6027","text":"
  • 2023/06/15
  1. \u652f\u6301 YOLOv3/v4/v5/v7/v8/X \u7b496\u4e2a\u6a21\u578b\uff0c\u53d1\u5e03\u4e8623\u4e2a\u6a21\u578bweights\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003 MODEL ZOO\u3002
  2. \u914d\u5957 MindSpore 2.0\u3002
  3. \u652f\u6301 MindSpore lite 2.0 \u63a8\u7406\u3002
  4. \u65b0\u7684\u6559\u7a0b\u6587\u6863\u4e0a\u7ebf\uff01
"},{"location":"zh/#_2","title":"\u57fa\u51c6\u548c\u6a21\u578b\u4ed3\u5e93","text":"

\u67e5\u770b MODEL ZOO.

\u652f\u6301\u7684\u7b97\u6cd5
  • YOLOv8
  • YOLOv7
  • YOLOX
  • YOLOv5
  • YOLOv4
  • YOLOv3
"},{"location":"zh/#_3","title":"\u5b89\u88c5","text":"

\u67e5\u770b INSTALLATION

"},{"location":"zh/#_4","title":"\u5feb\u901f\u5165\u95e8","text":"

\u67e5\u770b GETTING STARTED

"},{"location":"zh/#mindyolo_1","title":"\u4e86\u89e3 MindYOLO \u7684\u66f4\u591a\u4fe1\u606f","text":"

\u656c\u8bf7\u671f\u5f85

"},{"location":"zh/#_5","title":"\u6ce8\u610f","text":"

\u26a0\ufe0f\u5f53\u524d\u7248\u672c\u57fa\u4e8eGRAPH\u7684\u9759\u6001Shape\u3002\u540e\u7eed\u5c06\u6dfb\u52a0PYNATIVE\u7684\u52a8\u6001Shape\u652f\u6301\uff0c\u656c\u8bf7\u671f\u5f85\u3002

"},{"location":"zh/#_6","title":"\u8d21\u732e\u65b9\u5f0f","text":"

\u6211\u4eec\u611f\u8c22\u5f00\u53d1\u8005\u7528\u6237\u7684\u6240\u6709\u8d21\u732e\uff0c\u5305\u62ec\u63d0issue\u548cPR\uff0c\u4e00\u8d77\u8ba9MindYOLO\u53d8\u5f97\u66f4\u597d\u3002

\u8d21\u732e\u6307\u5357\u8bf7\u53c2\u8003CONTRIBUTING.md\u3002

"},{"location":"zh/#_7","title":"\u8bb8\u53ef\u8bc1","text":"

MindYOLO\u9075\u5faaApache License 2.0\u5f00\u6e90\u534f\u8bae\u3002

"},{"location":"zh/#_8","title":"\u81f4\u8c22","text":"

MindYOLO\u662f\u4e00\u4e2a\u6b22\u8fce\u4efb\u4f55\u8d21\u732e\u548c\u53cd\u9988\u7684\u5f00\u6e90\u9879\u76ee\u3002\u6211\u4eec\u5e0c\u671b\u901a\u8fc7\u63d0\u4f9b\u7075\u6d3b\u4e14\u6807\u51c6\u5316\u7684\u5de5\u5177\u5305\u6765\u91cd\u65b0\u5b9e\u73b0\u73b0\u6709\u65b9\u6cd5\u548c\u5f00\u53d1\u65b0\u7684\u5b9e\u65f6\u76ee\u6807\u68c0\u6d4b\u65b9\u6cd5\uff0c\u4ece\u800c\u4e3a\u4e0d\u65ad\u53d1\u5c55\u7684\u7814\u7a76\u793e\u533a\u670d\u52a1\u3002

"},{"location":"zh/#_9","title":"\u5f15\u7528","text":"

\u5982\u679c\u4f60\u89c9\u5f97MindYOLO\u5bf9\u4f60\u7684\u9879\u76ee\u6709\u5e2e\u52a9\uff0c\u8bf7\u8003\u8651\u5f15\u7528\uff1a

@misc{MindSpore Object Detection YOLO 2023,\n    title={{MindSpore Object Detection YOLO}:MindSpore Object Detection YOLO Toolbox and Benchmark},\n    author={MindSpore YOLO Contributors},\n    howpublished = {\\url{https://github.com/mindspore-lab/mindyolo}},\n    year={2023}\n}\n
"},{"location":"zh/installation/","title":"\u5b89\u88c5","text":""},{"location":"zh/installation/#_2","title":"\u4f9d\u8d56","text":"
  • mindspore >= 2.0
  • numpy >= 1.17.0
  • pyyaml >= 5.3
  • openmpi 4.0.3 (\u5206\u5e03\u5f0f\u8bad\u7ec3\u6240\u9700)

\u4e3a\u4e86\u5b89\u88c5python\u76f8\u5173\u5e93\u4f9d\u8d56\uff0c\u53ea\u9700\u8fd0\u884c\uff1a

pip install -r requirements.txt\n

MindSpore\u53ef\u4ee5\u901a\u8fc7\u9075\u5faa\u5b98\u65b9\u6307\u5f15\uff0c\u5728\u4e0d\u540c\u7684\u786c\u4ef6\u5e73\u53f0\u4e0a\u83b7\u5f97\u6700\u4f18\u7684\u5b89\u88c5\u4f53\u9a8c\u3002 \u4e3a\u4e86\u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\u8fd0\u884c\uff0c\u60a8\u8fd8\u9700\u8981\u5b89\u88c5OpenMPI\u3002

\u26a0\ufe0f \u5f53\u524d\u7248\u672c\u4ec5\u652f\u6301Ascend\u5e73\u53f0\uff0cGPU\u4f1a\u5728\u540e\u7eed\u652f\u6301\uff0c\u656c\u8bf7\u671f\u5f85\u3002

"},{"location":"zh/installation/#pypi","title":"PyPI\u6e90\u5b89\u88c5","text":"

MindYOLO \u53d1\u5e03\u4e3a\u4e00\u4e2aPython\u5305\u5e76\u80fd\u591f\u901a\u8fc7pip\u8fdb\u884c\u5b89\u88c5\u3002\u6211\u4eec\u63a8\u8350\u60a8\u5728\u865a\u62df\u73af\u5883\u5b89\u88c5\u4f7f\u7528\u3002 \u6253\u5f00\u7ec8\u7aef\uff0c\u8f93\u5165\u4ee5\u4e0b\u6307\u4ee4\u6765\u5b89\u88c5 MindYOLO:

pip install mindyolo\n
"},{"location":"zh/installation/#_3","title":"\u6e90\u7801\u5b89\u88c5 (\u672a\u7ecf\u6d4b\u8bd5\u7248\u672c)","text":""},{"location":"zh/installation/#from-vsc","title":"from VSC","text":"
pip install git+https://github.com/mindspore-lab/mindyolo.git\n
"},{"location":"zh/installation/#from-local-src","title":"from local src","text":"

\u7531\u4e8e\u672c\u9879\u76ee\u5904\u4e8e\u6d3b\u8dc3\u5f00\u53d1\u9636\u6bb5\uff0c\u5982\u679c\u60a8\u662f\u5f00\u53d1\u8005\u6216\u8005\u8d21\u732e\u8005\uff0c\u8bf7\u4f18\u5148\u9009\u62e9\u6b64\u5b89\u88c5\u65b9\u5f0f\u3002

MindYOLO \u53ef\u4ee5\u5728\u7531 GitHub \u514b\u9686\u4ed3\u5e93\u5230\u672c\u5730\u6587\u4ef6\u5939\u540e\u76f4\u63a5\u4f7f\u7528\u3002 \u8fd9\u5bf9\u4e8e\u60f3\u4f7f\u7528\u6700\u65b0\u7248\u672c\u7684\u5f00\u53d1\u8005\u5341\u5206\u65b9\u4fbf:

git clone https://github.com/mindspore-lab/mindyolo.git\n

\u5728\u514b\u9686\u5230\u672c\u5730\u4e4b\u540e\uff0c\u63a8\u8350\u60a8\u4f7f\u7528\"\u53ef\u7f16\u8f91\"\u6a21\u5f0f\u8fdb\u884c\u5b89\u88c5\uff0c\u8fd9\u6709\u52a9\u4e8e\u89e3\u51b3\u6f5c\u5728\u7684\u6a21\u5757\u5bfc\u5165\u95ee\u9898\u3002

cd mindyolo\npip install -e .\n

\u53e6\u5916, \u6211\u4eec\u63d0\u4f9b\u4e86\u4e00\u4e2a\u53ef\u9009\u7684 fast coco api \u63a5\u53e3\u7528\u4e8e\u63d0\u5347\u9a8c\u8bc1\u8fc7\u7a0b\u7684\u901f\u5ea6\u3002\u4ee3\u7801\u662f\u4ee5C++\u5f62\u5f0f\u63d0\u4f9b\u7684\uff0c\u53ef\u4ee5\u5c1d\u8bd5\u7528\u4ee5\u4e0b\u7684\u547d\u4ee4\u8fdb\u884c\u5b89\u88c5 (\u6b64\u64cd\u4f5c\u662f\u53ef\u9009\u7684) :

cd mindyolo/csrc\nsh build.sh\n
"},{"location":"zh/modelzoo/","title":"\u6a21\u578b\u4ed3\u5e93","text":""},{"location":"zh/how_to_guides/write_a_new_model/","title":"\u6a21\u578b\u7f16\u5199\u6307\u5357","text":"

\u672c\u6587\u6863\u63d0\u4f9bMindYOLO\u7f16\u5199\u81ea\u5b9a\u4e49\u6a21\u578b\u7684\u6559\u7a0b\u3002 \u5206\u4e3a\u4e09\u4e2a\u90e8\u5206\uff1a - \u6a21\u578b\u5b9a\u4e49\uff1a\u6211\u4eec\u53ef\u4ee5\u76f4\u63a5\u5b9a\u4e49\u4e00\u4e2a\u7f51\u7edc\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528yaml\u6587\u4ef6\u65b9\u5f0f\u5b9a\u4e49\u4e00\u4e2a\u7f51\u7edc\u3002 - \u6ce8\u518c\u6a21\u578b\uff1a\u53ef\u9009\uff0c\u6ce8\u518c\u4e4b\u540e\u53ef\u4ee5\u5728create_model\u63a5\u53e3\u4e2d\u4f7f\u7528\u6587\u4ef6\u540d\u521b\u5efa\u81ea\u5b9a\u4e49\u7684\u6a21\u578b - \u9a8c\u8bc1: \u9a8c\u8bc1\u6a21\u578b\u662f\u5426\u53ef\u8fd0\u884c

"},{"location":"zh/how_to_guides/write_a_new_model/#_2","title":"\u6a21\u578b\u5b9a\u4e49","text":""},{"location":"zh/how_to_guides/write_a_new_model/#1python","title":"1.\u76f4\u63a5\u4f7f\u7528python\u4ee3\u7801\u6765\u7f16\u5199\u7f51\u7edc","text":""},{"location":"zh/how_to_guides/write_a_new_model/#_3","title":"\u6a21\u5757\u5bfc\u5165","text":"

\u5bfc\u5165MindSpore\u6846\u67b6\u4e2d\u7684nn\u6a21\u5757\u548cops\u6a21\u5757\uff0c\u7528\u4e8e\u5b9a\u4e49\u795e\u7ecf\u7f51\u7edc\u7684\u7ec4\u4ef6\u548c\u64cd\u4f5c\u3002

import mindspore.nn as nn\nimport mindspore.ops.operations as ops\n

"},{"location":"zh/how_to_guides/write_a_new_model/#_4","title":"\u521b\u5efa\u6a21\u578b","text":"

\u5b9a\u4e49\u4e86\u4e00\u4e2a\u7ee7\u627f\u81eann.Cell\u7684\u6a21\u578b\u7c7bMyModel\u3002\u5728\u6784\u9020\u51fd\u6570__init__\u4e2d\uff0c\u5b9a\u4e49\u6a21\u578b\u7684\u5404\u4e2a\u7ec4\u4ef6\uff1a

class MyModel(nn.Cell):\n    def __init__(self):\n        super(MyModel, self).__init__()\n        #conv1\u662f\u4e00\u4e2a2D\u5377\u79ef\u5c42\uff0c\u8f93\u5165\u901a\u9053\u6570\u4e3a3\uff0c\u8f93\u51fa\u901a\u9053\u6570\u4e3a16\uff0c\u5377\u79ef\u6838\u5927\u5c0f\u4e3a3x3\uff0c\u6b65\u957f\u4e3a1\uff0c\u586b\u5145\u4e3a1\u3002\n        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)\n        #relu\u662f\u4e00\u4e2aReLU\u6fc0\u6d3b\u51fd\u6570\u64cd\u4f5c\u3002\n        self.relu = ops.ReLU()\n        #axpool\u662f\u4e00\u4e2a2D\u6700\u5927\u6c60\u5316\u5c42\uff0c\u6c60\u5316\u7a97\u53e3\u5927\u5c0f\u4e3a2x2\uff0c\u6b65\u957f\u4e3a2\u3002\n        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n        #conv2\u662f\u53e6\u4e00\u4e2a2D\u5377\u79ef\u5c42\uff0c\u8f93\u5165\u901a\u9053\u6570\u4e3a16\uff0c\u8f93\u51fa\u901a\u9053\u6570\u4e3a32\uff0c\u5377\u79ef\u6838\u5927\u5c0f\u4e3a3x3\uff0c\u6b65\u957f\u4e3a1\uff0c\u586b\u5145\u4e3a1\u3002\n        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)\n        #fc\u662f\u4e00\u4e2a\u5168\u8fde\u63a5\u5c42\uff0c\u8f93\u5165\u7279\u5f81\u7ef4\u5ea6\u4e3a32x8x8\uff0c\u8f93\u51fa\u7279\u5f81\u7ef4\u5ea6\u4e3a10\u3002\n        self.fc = nn.Dense(32 * 8 * 8, 10)\n\n    #\u5728construct\u65b9\u6cd5\u4e2d\uff0c\u5b9a\u4e49\u4e86\u6a21\u578b\u7684\u524d\u5411\u4f20\u64ad\u8fc7\u7a0b\u3002\u8f93\u5165x\u7ecf\u8fc7\u5377\u79ef\u3001\u6fc0\u6d3b\u51fd\u6570\u3001\u6c60\u5316\u7b49\u64cd\u4f5c\u540e\uff0c\u901a\u8fc7\u5c55\u5e73\u64cd\u4f5c\u5c06\u7279\u5f81\u5f20\u91cf\u53d8\u4e3a\u4e00\u7ef4\u5411\u91cf\uff0c\u7136\u540e\u901a\u8fc7\u5168\u8fde\u63a5\u5c42\u5f97\u5230\u6700\u7ec8\u7684\u8f93\u51fa\u7ed3\u679c\u3002    \n    def construct(self, x): \n        x = self.conv1(x)\n        x = self.relu(x)\n        x = self.maxpool(x)\n        x = self.conv2(x)\n        x = self.relu(x)\n        x = self.maxpool(x)\n        x = x.view(x.shape[0], -1)\n        x = self.fc(x)\n        return x\n
"},{"location":"zh/how_to_guides/write_a_new_model/#_5","title":"\u521b\u5efa\u6a21\u578b\u5b9e\u4f8b","text":"

\u901a\u8fc7\u5b9e\u4f8b\u5316MyModel\u7c7b\uff0c\u521b\u5efa\u4e00\u4e2a\u6a21\u578b\u5b9e\u4f8bmodel\uff0c\u540e\u7eed\u53ef\u4ee5\u4f7f\u7528\u8be5\u5b9e\u4f8b\u8fdb\u884c\u6a21\u578b\u7684\u8bad\u7ec3\u548c\u63a8\u7406\u3002

model = MyModel()\n

"},{"location":"zh/how_to_guides/write_a_new_model/#2yaml","title":"2.\u4f7f\u7528yaml\u6587\u4ef6\u7f16\u5199\u7f51\u7edc","text":"

\u901a\u5e38\u9700\u8981\u4ee5\u4e0b\u4e09\u4e2a\u6b65\u9aa4 - \u65b0\u5efa\u4e00\u4e2amymodel.yaml\u6587\u4ef6 - \u65b0\u5efa\u5bf9\u5e94\u7684mymodel.py\u6587\u4ef6 - \u5728mindyolo/models/init.py\u6587\u4ef6\u4e2d\u5f15\u5165\u8be5\u6a21\u578b

\u4ee5\u4e0b\u662f\u7f16\u5199mymodel.yaml\u6587\u4ef6\u7684\u8be6\u7ec6\u6307\u5bfc: \u4ee5\u7f16\u5199\u4e00\u4e2a\u7b80\u5355\u7f51\u7edc\u4e3a\u4f8b\uff1a \u4ee5yaml\u683c\u5f0f\u7f16\u5199\u5fc5\u8981\u53c2\u6570\uff0c\u540e\u7eed\u5728mymodel.py\u6587\u4ef6\u91cc\u9762\u53ef\u4ee5\u7528\u5230\u8fd9\u4e9b\u53c2\u6570\u3002 \u5176\u4e2dnetwork\u90e8\u5206\u4e3a\u6a21\u578b\u7f51\u7edc [[from, number, module, args], ...]\uff1a\u6bcf\u4e2a\u5143\u7d20\u4ee3\u8868\u4e00\u4e2a\u7f51\u7edc\u5c42\u7684\u914d\u7f6e\u3002

# __BASE__\u4e2d\u7684yaml\u8868\u793a\u7528\u4e8e\u7ee7\u627f\u7684\u57fa\u7840\u914d\u7f6e\u6587\u4ef6\uff0c\u91cd\u590d\u7684\u53c2\u6570\u4f1a\u88ab\u5f53\u524d\u6587\u4ef6\u8986\u76d6\uff1b\n__BASE__:\n- '../coco.yaml'\n- './hyp.scratch-high.yaml'\n\nper_batch_size: 32\nimg_size: 640\nsync_bn: False\n\nnetwork:\nmodel_name: mymodel\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nstride: [ 8, 16, 32 ]\n\n# \u9aa8\u5e72\u7f51\u7edc\u90e8\u5206\u7684\u914d\u7f6e\uff0c\u6bcf\u5c42\u7684\u5143\u7d20\u542b\u4e49\u4e3a\n# [from, number, module, args]\n# \u4ee5\u7b2c\u4e00\u5c42\u4e3a\u4f8b\uff0c[-1, 1, ConvNormAct, [32, 3, 1]], \u8868\u793a\u8f93\u5165\u6765\u81ea `-1`(\u4e0a\u4e00\u5c42) \uff0c\u91cd\u590d\u6b21\u6570\u4e3a 1\uff0c\u6a21\u5757\u540d\u4e3a ConvNormAct\uff0c\u6a21\u5757\u8f93\u5165\u53c2\u6570\u4e3a [32, 3, 1]\uff1b\nbackbone: [[-1, 1, ConvNormAct, [32, 3, 1]],  # 0\n[-1, 1, ConvNormAct, [64, 3, 2]],  # 1-P1/2\n[-1, 1, Bottleneck, [64]],\n[-1, 1, ConvNormAct, [128, 3, 2]],  # 3-P2/4\n[-1, 2, Bottleneck, [128]],\n[-1, 1, ConvNormAct, [256, 3, 2]],  # 5-P3/8\n[-1, 8, Bottleneck, [256]],\n]\n\n#head\u90e8\u5206\u7684\u914d\u7f6e \nhead: [\n[ -1, 1, ConvNormAct, [ 512, 3, 2 ] ],  # 7-P4/16\n[ -1, 8, Bottleneck, [ 512 ] ],\n[ -1, 1, ConvNormAct, [ 1024, 3, 2 ] ],  # 9-P5/32\n[ -1, 4, Bottleneck, [ 1024 ] ],  # 10\n]\n

\u7f16\u5199mymodel.py\u6587\u4ef6:

"},{"location":"zh/how_to_guides/write_a_new_model/#_6","title":"\u6a21\u5757\u5bfc\u5165","text":"

\u9700\u8981\u5bfc\u5165\u5957\u4ef6\u5185\u7684\u6a21\u5757\u3002 \u5982from .registry import register_model\u7b49\u7b49

import numpy as np\n\nimport mindspore as ms\nfrom mindspore import Tensor, nn\n\n\nfrom .initializer import initialize_defult #\u7528\u4e8e\u521d\u59cb\u5316\u6a21\u578b\u7684\u9ed8\u8ba4\u53c2\u6570\uff0c\u5305\u62ec\u6743\u91cd\u521d\u59cb\u5316\u65b9\u5f0f\u3001BN \u5c42\u53c2\u6570\u7b49\u3002\nfrom .model_factory import build_model_from_cfg #\u7528\u4e8e\u6839\u636e YAML \u914d\u7f6e\u6587\u4ef6\u4e2d\u7684\u53c2\u6570\u6784\u5efa\u76ee\u6807\u68c0\u6d4b\u6a21\u578b\uff0c\u5e76\u8fd4\u56de\u8be5\u6a21\u578b\u7684\u5b9e\u4f8b\u3002\nfrom .registry import register_model #\u7528\u4e8e\u5c06\u81ea\u5b9a\u4e49\u7684\u6a21\u578b\u6ce8\u518c\u5230 Mindyolo \u4e2d\uff0c\u4ee5\u4fbf\u5728 YAML \u914d\u7f6e\u6587\u4ef6\u4e2d\u4f7f\u7528\u3002\n\n#\u53ef\u89c1\u6027\u58f0\u660e\n__all__ = [\"MYmodel\", \"mymodel\"]\n
"},{"location":"zh/how_to_guides/write_a_new_model/#_7","title":"\u521b\u5efa\u914d\u7f6e\u5b57\u5178","text":"

_cfg\u51fd\u6570\u662f\u4e00\u4e2a\u8f85\u52a9\u51fd\u6570\uff0c\u7528\u4e8e\u521b\u5efa\u914d\u7f6e\u5b57\u5178\u3002\u5b83\u63a5\u53d7\u4e00\u4e2aurl\u53c2\u6570\u548c\u5176\u4ed6\u5173\u952e\u5b57\u53c2\u6570\uff0c\u5e76\u8fd4\u56de\u4e00\u4e2a\u5305\u542burl\u548c\u5176\u4ed6\u53c2\u6570\u7684\u5b57\u5178\u3002 default_cfgs\u662f\u4e00\u4e2a\u5b57\u5178\uff0c\u7528\u4e8e\u5b58\u50a8\u9ed8\u8ba4\u914d\u7f6e\u3002\u5728\u8fd9\u91cc\uff0cmymodel\u4f5c\u4e3a\u952e\uff0c\u4f7f\u7528_cfg\u51fd\u6570\u521b\u5efa\u4e86\u4e00\u4e2a\u914d\u7f6e\u5b57\u5178\u3002

def _cfg(url=\"\", **kwargs):\n    return {\"url\": url, **kwargs}\n\ndefault_cfgs = {\"mymodel\": _cfg(url=\"\")}\n

"},{"location":"zh/how_to_guides/write_a_new_model/#_8","title":"\u521b\u5efa\u6a21\u578b","text":"

\u5728MindSpore\u4e2d\uff0c\u6a21\u578b\u7684\u7c7b\u7ee7\u627f\u4e8enn.Cell\uff0c\u4e00\u822c\u6765\u8bf4\u9700\u8981\u91cd\u8f7d\u4ee5\u4e0b\u4e24\u4e2a\u51fd\u6570\uff1a

  • \u5728__init__\u51fd\u6570\u4e2d\uff0c\u5e94\u5f53\u5b9a\u4e49\u6a21\u578b\u4e2d\u9700\u8981\u7528\u5230\u7684module\u5c42\u3002
  • \u5728construct\u51fd\u6570\u4e2d\u5b9a\u4e49\u6a21\u578b\u524d\u5411\u903b\u8f91\u3002
class MYmodel(nn.Cell):\n\n    def __init__(self, cfg, in_channels=3, num_classes=None, sync_bn=False):\n        super(MYmodel, self).__init__()\n        self.cfg = cfg\n        self.stride = Tensor(np.array(cfg.stride), ms.int32)\n        self.stride_max = int(max(self.cfg.stride))\n        ch, nc = in_channels, num_classes\n\n        self.nc = nc  # override yaml value\n        self.model = build_model_from_cfg(model_cfg=cfg, in_channels=ch, num_classes=nc, sync_bn=sync_bn)\n        self.names = [str(i) for i in range(nc)]  # default names\n\n        initialize_defult()  # \u53ef\u9009\uff0c\u4f60\u53ef\u80fd\u9700\u8981initialize_defult\u65b9\u6cd5\u4ee5\u83b7\u5f97\u548cpytorch\u4e00\u6837\u7684conv2d\u3001dense\u5c42\u7684\u521d\u59cb\u5316\u65b9\u5f0f\uff1b\n\n    def construct(self, x):\n        return self.model(x)\n
"},{"location":"zh/how_to_guides/write_a_new_model/#_9","title":"\u6ce8\u518c\u6a21\u578b\uff08\u53ef\u9009\uff09","text":"

\u5982\u679c\u9700\u8981\u4f7f\u7528mindyolo\u63a5\u53e3\u521d\u59cb\u5316\u81ea\u5b9a\u4e49\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u9700\u8981\u5148\u5bf9\u6a21\u578b\u8fdb\u884c**\u6ce8\u518c**\u548c**\u5bfc\u5165**

\u6a21\u578b\u6ce8\u518c

@register_model #\u6ce8\u518c\u540e\u7684\u6a21\u578b\u53ef\u4ee5\u901a\u8fc7 create_model \u63a5\u53e3\u4ee5\u6a21\u578b\u540d\u7684\u65b9\u5f0f\u8fdb\u884c\u8bbf\u95ee\uff1b\ndef mymodel(cfg, in_channels=3, num_classes=None, **kwargs) -> MYmodel:\n\"\"\"Get GoogLeNet model.\n    Refer to the base class `models.GoogLeNet` for more details.\"\"\"\n    model = MYmodel(cfg=cfg, in_channels=in_channels, num_classes=num_classes, **kwargs)\n    return model\n
\u6a21\u578b\u5bfc\u5165

#\u5728mindyolo/models/_init_.py\u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u4ee3\u7801\n\nfrom . import mymodel #mymodel.py\u6587\u4ef6\u901a\u5e38\u653e\u5728mindyolo/models/\u76ee\u5f55\u4e0b\n__all__.extend(mymodel.__all__)\nfrom .mymodel import *\n
"},{"location":"zh/how_to_guides/write_a_new_model/#main","title":"\u9a8c\u8bc1main","text":"

\u521d\u59cb\u7f16\u5199\u9636\u6bb5\u5e94\u5f53\u4fdd\u8bc1\u6a21\u578b\u662f\u53ef\u8fd0\u884c\u7684\u3002\u53ef\u901a\u8fc7\u4e0b\u8ff0\u4ee3\u7801\u5757\u8fdb\u884c\u57fa\u7840\u9a8c\u8bc1\uff1a \u9996\u5148\u5bfc\u5165\u6240\u9700\u7684\u6a21\u5757\u548c\u51fd\u6570\u3002\u7136\u540e\uff0c\u901a\u8fc7\u89e3\u6790\u914d\u7f6e\u5bf9\u8c61\u3002

if __name__ == \"__main__\":\n    from mindyolo.models.model_factory import create_model\n    from mindyolo.utils.config import parse_config\n\n    opt = parse_config()\n
\u521b\u5efa\u6a21\u578b\u5e76\u6307\u5b9a\u76f8\u5173\u53c2\u6570\uff0c\u6ce8\u610f\uff1a\u5982\u679c\u8981\u5728create_model\u4e2d\u4f7f\u7528\u6587\u4ef6\u540d\u521b\u5efa\u81ea\u5b9a\u4e49\u7684\u6a21\u578b\uff0c\u90a3\u4e48\u9700\u8981\u5148\u4f7f\u7528\u6ce8\u518c\u5668@register_model\u8fdb\u884c\u6ce8\u518c\uff0c\u8bf7\u53c2\u89c1\u4e0a\u6587 \u6ce8\u518c\u6a21\u578b\uff08\u53ef\u9009)\u90e8\u5206\u5185\u5bb9
    model = create_model(\n        model_name=\"mymodel\",\n        model_cfg=opt.net,\n        num_classes=opt.data.nc,\n        sync_bn=opt.sync_bn if hasattr(opt, \"sync_bn\") else False,\n    ) \n

\u5426\u5219\uff0c\u8bf7\u4f7f\u7528import\u7684\u65b9\u5f0f\u5f15\u5165\u6a21\u578b

    from mindyolo.models.mymodel import MYmodel\n    model = MYmodel(\n        model_name=\"mymodel\",\n        model_cfg=opt.net,\n        num_classes=opt.data.nc,\n        sync_bn=opt.sync_bn if hasattr(opt, \"sync_bn\") else False,\n    ) \n
\u6700\u540e\uff0c\u521b\u5efa\u4e00\u4e2a\u8f93\u5165\u5f20\u91cfx\u5e76\u5c06\u5176\u4f20\u9012\u7ed9\u6a21\u578b\u8fdb\u884c\u524d\u5411\u8ba1\u7b97\u3002
    x = Tensor(np.random.randn(1, 3, 640, 640), ms.float32)\n    out = model(x)\n    out = out[0] if isinstance(out, (list, tuple)) else out\n    print(f\"Output shape is {[o.shape for o in out]}\")\n

"},{"location":"zh/notes/changelog/","title":"\u66f4\u65b0\u65e5\u5fd7","text":"

\u5373\u5c06\u5230\u6765

"},{"location":"zh/notes/code_of_conduct/","title":"\u884c\u4e3a\u51c6\u5219","text":"

\u5373\u5c06\u5230\u6765

"},{"location":"zh/notes/faq/","title":"\u5e38\u89c1\u95ee\u9898","text":"

\u5373\u5c06\u5230\u6765

"},{"location":"zh/tutorials/configuration/","title":"\u914d\u7f6e","text":""},{"location":"zh/tutorials/deployment/","title":"\u90e8\u7f72","text":""},{"location":"zh/tutorials/finetune/","title":"\u5fae\u8c03","text":""},{"location":"zh/tutorials/modelarts/","title":"\u4e91\u4e0a\u542f\u52a8","text":""},{"location":"zh/tutorials/quick_start/#mindyolo","title":"MindYOLO \u5feb\u901f\u5165\u95e8","text":"

\u672c\u6587\u7b80\u8981\u4ecb\u7ecdMindYOLO\u4e2d\u5185\u7f6e\u7684\u547d\u4ee4\u884c\u5de5\u5177\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

"},{"location":"zh/tutorials/quick_start/#_2","title":"\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u8fdb\u884c\u63a8\u7406","text":"
  1. \u4ecemodel zoo\u4e2d\u9009\u62e9\u4e00\u4e2a\u6a21\u578b\u53ca\u5176\u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff0c ./configs/yolov7/yolov7.yaml.
  2. \u4ecemodel zoo\u4e2d\u4e0b\u8f7d\u76f8\u5e94\u7684\u9884\u8bad\u7ec3\u6a21\u578b\u6743\u91cd\u6587\u4ef6\u3002
  3. \u4f7f\u7528\u5185\u7f6e\u914d\u7f6e\u8fdb\u884c\u63a8\u7406\uff0c\u8bf7\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a
# NPU (\u9ed8\u8ba4)\npython demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg\n\n# GPU\npython demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg --device_target=GPU\n

\u6709\u5173\u547d\u4ee4\u884c\u53c2\u6570\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605demo/predict.py -h\uff0c\u6216\u67e5\u770b\u5176\u6e90\u4ee3\u7801\u3002

  • \u8981\u5728CPU\u4e0a\u8fd0\u884c\uff0c\u8bf7\u5c06device_target\u7684\u503c\u4fee\u6539\u4e3aCPU.
  • \u7ed3\u679c\u5c06\u4fdd\u5b58\u5728./detect_results\u76ee\u5f55\u4e0b
"},{"location":"zh/tutorials/quick_start/#_3","title":"\u4f7f\u7528\u547d\u4ee4\u884c\u8fdb\u884c\u8bad\u7ec3\u548c\u8bc4\u4f30","text":"
  • \u6309\u7167YOLO\u683c\u5f0f\u51c6\u5907\u60a8\u7684\u6570\u636e\u96c6\u3002\u5982\u679c\u4f7f\u7528COCO\u6570\u636e\u96c6\uff08YOLO\u683c\u5f0f\uff09\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bf7\u4eceyolov5\u6216darknet\u51c6\u5907\u6570\u636e\u96c6.
  coco/\n    {train,val}2017.txt\n    annotations/\n      instances_{train,val}2017.json\n    images/\n      {train,val}2017/\n          00000001.jpg\n          ...\n          # image files that are mentioned in the corresponding train/val2017.txt\n    labels/\n      {train,val}2017/\n          00000001.txt\n          ...\n          # label files that are mentioned in the corresponding train/val2017.txt\n
  • \u5728\u591a\u5361NPU/GPU\u4e0a\u8fdb\u884c\u5206\u5e03\u5f0f\u6a21\u578b\u8bad\u7ec3\uff0c\u4ee58\u5361\u4e3a\u4f8b:
mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True\n
  • \u5728\u5355\u5361NPU/GPU/CPU\u4e0a\u8bad\u7ec3\u6a21\u578b\uff1a
python train.py --config ./configs/yolov7/yolov7.yaml 
  • \u8bc4\u4f30\u6a21\u578b\u7684\u7cbe\u5ea6\uff1a

python test.py --config ./configs/yolov7/yolov7.yaml --weight /path_to_ckpt/WEIGHT.ckpt\n
\u6ce8\u610f\uff1a\u9ed8\u8ba4\u8d85\u53c2\u4e3a8\u5361\u8bad\u7ec3\uff0c\u5355\u5361\u60c5\u51b5\u9700\u8c03\u6574\u90e8\u5206\u53c2\u6570\u3002 \u9ed8\u8ba4\u8bbe\u5907\u4e3aAscend\uff0c\u60a8\u53ef\u4ee5\u6307\u5b9a'device_target'\u7684\u503c\u4e3aAscend/GPU/CPU\u3002 * \u6709\u5173\u66f4\u591a\u9009\u9879\uff0c\u8bf7\u53c2\u9605 train/test.py -h. * \u5728\u4e91\u8111\u4e0a\u8fdb\u884c\u8bad\u7ec3\uff0c\u8bf7\u5728\u8fd9\u91cc\u67e5\u770b

"},{"location":"zh/tutorials/quick_start/#_4","title":"\u90e8\u7f72","text":"

\u8bf7\u5728\u8fd9\u91cc\u67e5\u770b.

"},{"location":"zh/tutorials/quick_start/#mindyolo-api","title":"\u5728\u4ee3\u7801\u4e2d\u4f7f\u7528MindYOLO API","text":"

\u656c\u8bf7\u671f\u5f85

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..718e7f26 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,131 @@ + + + + https://mindspore-lab.github.io/mindyolo/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/installation/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/modelzoo/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/how_to_guides/write_a_new_model/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/notes/changelog/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/notes/code_of_conduct/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/notes/contributing/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/notes/faq/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/reference/data/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/reference/loss/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/reference/models/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/tutorials/configuration/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/tutorials/deployment/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/tutorials/finetune/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/tutorials/modelarts/ + 2023-07-17 + daily + + + + + + https://mindspore-lab.github.io/mindyolo/tutorials/quick_start/ + 2023-07-17 + daily + + + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..b93bc4fdb2627477771fb94b92ee7675400ea954 GIT binary patch literal 584 zcmV-O0=NAiiwFp4_OxUI|8r?{Wo=<_E_iKh0Nt3)lAeWNOedwxeTpOKQUMi+Tyg;{1mW3LT;UW_aLDD+eglwKcF;|V(l)xx$A(4WXwdm zc7Ol+Is4(h1y6pVxCV60WTDq?hPYi3QkJEs8A49C+LJ0JI?Bv}CZ|C%_m*x|v--#| z=NFabi>+BKmrG*Ly1|oUwj-#bBU<8XOIcMli&TV3=Vlr8VD8sa1F=$dMa}y0|D4+~2e9XT77p*sq4=tos zyF#KNR-NZE9kb2rD81Rc3+}>}xh6uHgpm|+5#a=!T?Ojf+&z3J{sbzvDbT#lPC;HR zP4DKK*d1s~=x+SBjv;!M^ah`m?9mspsOUkS-QvVNEMe_PeYYWGJvYj z+>TV)7to2wI%OF^*DZ3Z!mvQ880Gevn6R`^qyM|3DN=9xzO;ajEXJ_qa-W01X(+7a z8Ngo?y(4)E6M%()X{bA=89>}^c1POmO`0my)A05$&H&;X;2oLU6)buWH5%YF+|>gO WV6PEh_h)w+F!&3&k9U%;8vp>~h92|) literal 0 HcmV?d00001 diff --git a/tutorials/configuration/index.html b/tutorials/configuration/index.html new file mode 100644 index 00000000..12167ff6 --- /dev/null +++ b/tutorials/configuration/index.html @@ -0,0 +1,1145 @@ + + + + + + + + + + + + + + + + + + + + + + + + Configuration - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Configuration

+ +

配置

+

MindYOLO套件同时支持yaml文件参数和命令行参数解析,并将相对固定、与模型强相关、较为复杂或者含有嵌套结构的参数编写成yaml文件,需根据实际应用场景更改或者较为简单的参数则通过命令行传入。

+

下面以yolov3为例,解释如何配置相应的参数。

+

参数继承关系

+

参数优先级由高到低如下,出现同名参数时,低优先级参数会被高优先级参数覆盖

+
    +
  • 用户命令行传入参数
  • +
  • python执行py文件中parser的默认参数
  • +
  • 命令行传入config参数对应的yaml文件参数
  • +
  • 命令行传入config参数对应的yaml文件中__BASE__参数中包含的yaml文件参数,例如yolov3.yaml含有如下参数: +
    __BASE__: [
    +  '../coco.yaml',
    +  './hyp.scratch.yaml',
    +]
    +
  • +
+

基础参数

+

参数说明

+
    +
  • device_target: 所用设备,Ascend/GPU/CPU
  • +
  • save_dir: 运行结果保存路径,默认为./runs
  • +
  • log_interval: 打印日志step间隔,默认为100
  • +
  • is_parallel: 是否分布式训练,默认为False
  • +
  • ms_mode: 使用静态图模式(0)或动态图模式(1),默认为0。
  • +
  • config: yaml配置文件路径
  • +
  • per_batch_size: 每张卡batch size,默认为32
  • +
  • epochs: 训练epoch数,默认为300
  • +
  • ...
  • +
+

parse参数设置

+

该部分参数通常由命令行传入,示例如下:

+
mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True --log_interval 50
+
+

数据集

+

参数说明

+
    +
  • dataset_name: 数据集名称
  • +
  • train_set: 训练集所在路径
  • +
  • val_set: 验证集所在路径
  • +
  • test_set: 测试集所在路径
  • +
  • nc: 数据集类别数
  • +
  • names: 类别名称
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/coco.yaml中定义,通常需修改其中的数据集路径

+

```yaml +data: + dataset_name: coco

+

train_set: ./coco/train2017.txt # 118287 images + val_set: ./coco/val2017.txt # 5000 images + test_set: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

+

nc: 80

+

# class names + names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] + ```

+

数据增强

+

参数说明

+
    +
  • num_parallel_workers: 读取数据的工作进程数
  • +
  • train_transformers: 训练过程数据增强
  • +
  • test_transformers: 验证过程数据增强
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,其中train_transformers和test_transformers均为由字典组成的列表,各字典包含数据增强操作名称、发生概率及该增强方法相关的参数

+

```yaml +data: + num_parallel_workers: 4

+

train_transforms: + - { func_name: mosaic, prob: 1.0, mosaic9_prob: 0.0, translate: 0.1, scale: 0.9 } + - { func_name: mixup, prob: 0.1, alpha: 8.0, beta: 8.0, needed_mosaic: True } + - { func_name: hsv_augment, prob: 1.0, hgain: 0.015, sgain: 0.7, vgain: 0.4 } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: albumentations } + - { func_name: fliplr, prob: 0.5 } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True }

+

test_transforms: + - { func_name: letterbox, scaleup: False } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True } + ```

+

模型

+

参数说明

+
    +
  • model_name: 模型名称
  • +
  • depth_multiple: 模型深度因子
  • +
  • width_multiple: 模型宽度因子
  • +
  • stride: 特征图下采样倍数
  • +
  • anchors: 预设锚框
  • +
  • backbone: 模型骨干网络
  • +
  • head: 模型检测头
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/yolov3.yaml中定义,根据backbon和head参数进行网络构建,参数以嵌套列表的形式呈现,每行代表一层模块,包含4个参数,分别是 输入层编号(-1代表上一层)、模块重复次数、模块名称和模块相应参数。用户也可以不借助yaml文件而直接在py文件中定义和注册网络。 +```yaml +network: + model_name: yolov3

+

depth_multiple: 1.0 # model depth multiple + width_multiple: 1.0 # layer channel multiple + stride: [8, 16, 32] + anchors: + - [10,13, 16,30, 33,23] # P⅜ + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32

+

# darknet53 backbone + backbone: + # [from, number, module, args] + [[-1, 1, ConvNormAct, [32, 3, 1]], # 0 + [-1, 1, ConvNormAct, [64, 3, 2]], # 1-P½ + [-1, 1, Bottleneck, [64]], + [-1, 1, ConvNormAct, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, ConvNormAct, [256, 3, 2]], # 5-P⅜ + [-1, 8, Bottleneck, [256]], + [-1, 1, ConvNormAct, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, ConvNormAct, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ]

+

# YOLOv3 head + head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], # 15 (P5/32-large)

+
 [-2, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, ConvNormAct, [512, 3, 1]],  # 22 (P4/16-medium)
+
+ [-2, 1, ConvNormAct, [128, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]],  # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, YOLOv3Head, [nc, anchors, stride]],   # Detect(P3, P4, P5)
+]
+
+

```

+

损失函数

+

参数说明

+
    +
  • name: 损失函数名称
  • +
  • box: box损失权重
  • +
  • cls: class损失权重
  • +
  • cls_pw: class损失正样本权重
  • +
  • obj: object损失权重
  • +
  • obj_pw: object损失正样本权重
  • +
  • fl_gamma: focal loss gamma
  • +
  • anchor_t: anchor shape比例阈值
  • +
  • label_smoothing: 标签平滑值
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义

+

yaml +loss: + name: YOLOv7Loss + box: 0.05 # box loss gain + cls: 0.5 # cls loss gain + cls_pw: 1.0 # cls BCELoss positive_weight + obj: 1.0 # obj loss gain (scale with pixels) + obj_pw: 1.0 # obj BCELoss positive_weight + fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) + anchor_t: 4.0 # anchor-multiple threshold + label_smoothing: 0.0 # label smoothing epsilon

+

优化器

+

参数说明

+
    +
  • optimizer: 优化器名称。
  • +
  • lr_init: 学习率初始值
  • +
  • warmup_epochs: warmup epoch数
  • +
  • warmup_momentum: warmup momentum初始值
  • +
  • warmup_bias_lr: warmup bias学习率初始值
  • +
  • min_warmup_step: 最小warmup step数
  • +
  • group_param: 参数分组策略
  • +
  • gp_weight_decay: 分组参数权重衰减系数
  • +
  • start_factor: 初始学习率因数
  • +
  • end_factor: 结束学习率因数
  • +
  • momentum:移动平均的动量
  • +
  • loss_scale:loss缩放系数
  • +
  • nesterov:是否使用Nesterov Accelerated Gradient (NAG)算法更新梯度。
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,如下示例中经过warmup阶段后的初始学习率为lr_init * start_factor = 0.01 * 1.0 = 0.01, 最终学习率为lr_init * end_factor = 0.01 * 0.01 = 0.0001

+

yaml +optimizer: + optimizer: momentum + lr_init: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) + momentum: 0.937 # SGD momentum/Adam beta1 + nesterov: True # update gradients with NAG(Nesterov Accelerated Gradient) algorithm + loss_scale: 1.0 # loss scale for optimizer + warmup_epochs: 3 # warmup epochs (fractions ok) + warmup_momentum: 0.8 # warmup initial momentum + warmup_bias_lr: 0.1 # warmup initial bias lr + min_warmup_step: 1000 # minimum warmup step + group_param: yolov7 # group param strategy + gp_weight_decay: 0.0005 # group param weight decay 5e-4 + start_factor: 1.0 + end_factor: 0.01

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorials/deployment/index.html b/tutorials/deployment/index.html new file mode 100644 index 00000000..597c988e --- /dev/null +++ b/tutorials/deployment/index.html @@ -0,0 +1,1277 @@ + + + + + + + + + + + + + + + + + + + + + + + + Deployment - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Deployment

+ +

MindYOLO部署

+

依赖

+
pip install -r requirement.txt
+
+

MindSpore Lite环境准备

+

参考:Lite环境配置
+ 注意:MindSpore Lite适配的python环境为3.7,请在安装Lite前准备好python3.7的环境
+ 1. 根据环境,下载配套的tar.gz包和whl包 + 2. 解压tar.gz包并安装对应版本的whl包 +

tar -zxvf mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.tar.gz
+pip install mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.whl
+
+ 3. 配置Lite的环境变量 + LITE_HOME为tar.gz解压出的文件夹路径,推荐使用绝对路径 +
export LITE_HOME=/path/to/mindspore-lite-{version}-{os}-{platform}
+export LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH
+export PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH
+

+

快速开始

+

模型转换

+

ckpt模型转为mindir模型,此步骤可在CPU/Ascend910上运行 +

python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target [CPU/Ascend]
+e.g.
+# 在CPU上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU
+# 在Ascend上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target Ascend
+

+

Lite Test

+
python deploy/test.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_config/yolo.yaml
+e.g.
+python deploy/test.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml
+
+

Lite Predict

+
python ./deploy/predict.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg
+e.g.
+python deploy/predict.py --model_type Lite --model_path ./yolov5n.mindir --conifg ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg
+
+

脚本说明

+
    +
  • predict.py 支持单张图片推理
  • +
  • test.py 支持COCO数据集推理
  • +
+

MindX部署

+

查看 MINDX

+

标准和支持的模型库

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND310x1-G640MS COCO 201737.23.2M8.7Gyamlckpt
mindir
YOLOv8SD310x1-G640MS COCO 201744.611.2M28.6Gyamlckpt
mindir
YOLOv8MD310x1-G640MS COCO 201750.525.9M78.9Gyamlckpt
mindir
YOLOv8LD310x1-G640MS COCO 201752.843.7M165.2Gyamlckpt
mindir
YOLOv8XD310x1-G640MS COCO 201753.768.2M257.8Gyamlckpt
mindir
YOLOv7TinyD310x1-G640MS COCO 201737.56.2M13.8Gyamlckpt
mindir
YOLOv7LD310x1-G640MS COCO 201750.836.9M104.7Gyamlckpt
mindir
YOLOv7XD310x1-G640MS COCO 201752.471.3M189.9Gyamlckpt
mindir
YOLOv5ND310x1-G640MS COCO 201727.31.9M4.5Gyamlckpt
mindir
YOLOv5SD310x1-G640MS COCO 201737.67.2M16.5Gyamlckpt
mindir
YOLOv5MD310x1-G640MS COCO 201744.921.2M49.0Gyamlckpt
mindir
YOLOv5LD310x1-G640MS COCO 201748.546.5M109.1Gyamlckpt
mindir
YOLOv5XD310x1-G640MS COCO 201750.586.7M205.7Gyamlckpt
mindir
YOLOv4CSPDarknet53D310x1-G608MS COCO 201745.427.6M52Gyamlckpt
mindir
YOLOv4CSPDarknet53(silu)D310x1-G640MS COCO 201745.827.6M52Gyamlckpt
mindir
YOLOv3Darknet53D310x1-G640MS COCO 201745.561.9M156.4Gyamlckpt
mindir
YOLOXND310x1-G416MS COCO 201724.10.9M1.1Gyamlckpt
mindir
YOLOXTinyD310x1-G416MS COCO 201733.35.1M6.5Gyamlckpt
mindir
YOLOXSD310x1-G640MS COCO 201740.79.0M26.8Gyamlckpt
mindir
YOLOXMD310x1-G640MS COCO 201746.725.3M73.8Gyamlckpt
mindir
YOLOXLD310x1-G640MS COCO 201749.254.2M155.6Gyamlckpt
mindir
YOLOXXD310x1-G640MS COCO 201751.699.1M281.9Gyamlckpt
mindir
YOLOXDarknet53D310x1-G640MS COCO 201747.763.7M185.3Gyamlckpt
mindir
+


+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorials/finetune/index.html b/tutorials/finetune/index.html new file mode 100644 index 00000000..21b07e46 --- /dev/null +++ b/tutorials/finetune/index.html @@ -0,0 +1,1080 @@ + + + + + + + + + + + + + + + + + + + + + + + + Finetune - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Finetune

+ +

自定义数据集finetune流程

+

本文以安全帽佩戴检测数据集(SHWD)为例,介绍自定义数据集在MindYOLO上进行finetune的主要流程。

+

数据集格式转换

+

SHWD数据集采用voc格式的数据标注,其文件目录如下所示: +

             ROOT_DIR
+                ├── Annotations
+                │        ├── 000000.xml
+                │        └── 000002.xml
+                ├── ImageSets
+                │       └── Main
+                │             ├── test.txt
+                │             ├── train.txt
+                │             ├── trainval.txt
+                │             └── val.txt
+                └── JPEGImages
+                        ├── 000000.jpg
+                        └── 000002.jpg
+
+其中,ImageSets/Main文件下的txt文件中每行代表相应子集中单张图片不含后缀的文件名,例如: +
000002
+000005
+000019
+000022
+000027
+000034
+

+

由于MindYOLO在验证阶段选用图片名称作为image_id,因此图片名称只能为数值类型,而不能为字符串类型,还需要对图片进行改名。对SHWD数据集格式的转换包含如下步骤: +* 将图片复制到相应的路径下并改名 +* 在根目录下相应的txt文件中写入该图片的相对路径 +* 解析xml文件,在相应路径下生成对应的txt标注文件 +* 验证集还需生成最终的json文件

+

详细实现可参考convert_shwd2yolo.py。运行方式如下:

+
python examples/finetune_SHWD/convert_shwd2yolo.py --root_dir /path_to_shwd/SHWD
+
+

运行以上命令将在不改变原数据集的前提下,在同级目录生成yolo格式的SHWD数据集。

+

预训练模型文件转换

+

由于SHWD数据集只有7000+张图片,选择yolov7-tiny进行该数据集的训练,可下载MindYOLO提供的在coco数据集上训练好的模型文件作为预训练模型。由于coco数据集含有80种物体类别,SHWD数据集只有两类,模型的最后一层head层输出与类别数nc有关,因此需将预训练模型文件的最后一层去掉, 可参考convert_yolov7-tiny_pretrain_ckpt.py。运行方式如下:

+
python examples/finetune_SHWD/convert_yolov7-tiny_pretrain_ckpt.py
+
+

模型微调(Finetune)

+

简要的训练流程可参考finetune_shwd.py

+
    +
  • 在多卡NPU/GPU上进行分布式模型训练,以8卡为例:
  • +
+
mpirun --allow-run-as-root -n 8 python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml --is_parallel True
+
+
    +
  • 在单卡NPU/GPU/CPU上训练模型:
  • +
+
python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml 
+
+

注意:直接用yolov7-tiny默认coco参数在SHWD数据集上训练,可取得AP50 87.0的精度。将lr_init参数由0.01改为0.001,即可实现ap50为89.2的精度结果。

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorials/modelarts/index.html b/tutorials/modelarts/index.html new file mode 100644 index 00000000..bfd737d2 --- /dev/null +++ b/tutorials/modelarts/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + + + + + + + + + + + + + + CloudBrain - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorials/quick_start/index.html b/tutorials/quick_start/index.html new file mode 100644 index 00000000..3d5c2471 --- /dev/null +++ b/tutorials/quick_start/index.html @@ -0,0 +1,1108 @@ + + + + + + + + + + + + + + + + + + + + + + + + Quick Start - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Quick Start

+ +

Getting Started with MindYOLO

+

This document provides a brief introduction to the usage of built-in command-line tools in MindYOLO.

+

Inference Demo with Pre-trained Models

+
    +
  1. Pick a model and its config file from the + model zoo, + such as, ./configs/yolov7/yolov7.yaml.
  2. +
  3. Download the corresponding pre-trained checkpoint from the model zoo of each model.
  4. +
  5. To run YOLO object detection with the built-in configs, please run:
  6. +
+
# Run with Ascend (By default)
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg
+
+# Run with GPU
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg --device_target=GPU
+
+

For details of the command line arguments, see demo/predict.py -h or look at its source code +to understand their behavior. Some common arguments are: +* To run on cpu, modify device_target to CPU. +* The results will be saved in ./detect_results

+

Training & Evaluation in Command Line

+
    +
  • Prepare your dataset in YOLO format. If trained with COCO (YOLO format), prepare it from yolov5 or the darknet.
  • +
+
+ +
  coco/
+    {train,val}2017.txt
+    annotations/
+      instances_{train,val}2017.json
+    images/
+      {train,val}2017/
+          00000001.jpg
+          ...
+          # image files that are mentioned in the corresponding train/val2017.txt
+    labels/
+      {train,val}2017/
+          00000001.txt
+          ...
+          # label files that are mentioned in the corresponding train/val2017.txt
+
+
+ +
    +
  • +

    To train a model on 8 NPUs/GPUs: +

    mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True
    +

    +
  • +
  • +

    To train a model on 1 NPU/GPU/CPU: +

    python train.py --config ./configs/yolov7/yolov7.yaml 
    +

    +
  • +
  • +

    To evaluate a model's performance: +

    python test.py --config ./configs/yolov7/yolov7.yaml --weight /path_to_ckpt/WEIGHT.ckpt
    +
    +Notes: (1) The default hyper-parameter is used for 8-card training, and some parameters need to be adjusted in the case of a single card. (2) The default device is Ascend, and you can modify it by specifying 'device_target' as Ascend/GPU/CPU, as these are currently supported.

    +
  • +
  • For more options, see train/test.py -h.
  • +
+

Deployment

+

See here.

+

To use MindYOLO APIs in Your Code

+

To be supplemented.

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/__pycache__/gen_ref_pages.cpython-38.pyc b/zh/__pycache__/gen_ref_pages.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4260e700aec82bb40572a853fce0a3bac798332f GIT binary patch literal 1996 zcmahJO^+Kj)b@N&lKpB~SST=J1yV{$Ky43Q0YX|35`q;ULM`rE~9;e%k zCR~;)qDQ2vQucz>dw-3uTdq0t|$$j2{v&Ead1?LK1;j3`2a`p{rua%cR6`7E+5`h|upqFzINYNKk zWIYj#Z_Su8xm7W4|IC15a_0%yJ%cTH z(>tbfoBJp9n0`!t_+w^N#@ykJ6FM`MTbcVnpIMc~n_Fb=^VSJ1IAAMVgs&bj`RgHK zK;vr%w*2i3TL|dpp>b0<)v0m6zdWJJ{LT2}ViAr+J!pmOr3u)Q@{Am(xzK7K&z{ zP2D(3)G#V0{eE&=n<>}?jImUO_6vn4(zUstCz}KvG0wtX0KBi|$b_54w-zdYTN~Isw6T5qJ$=hwg!q_9DDO zbQNAIITt_-l-htzv%Ju@6r=o>Xfyc+-f!cE z-db#LBbygtVW9D-W?{p&FkW^6>Y*Xaf%$~1Hfj?9MWCTIp%v1K9YY&9G_ln0)4ev+ zc7!o5YS*+4?ey}|D9#|)cp}wwq0cj__)r>kqD{tE;PnC>NT^FCVm(Bb1KarXG$Eh~ zd8~>BVs{`o3Cu!iZ3X&T9Q0EY~p7V=Gm?05|2 z#N`D<94s`5l6EkOp)wW)=naJWye`2-aB5{RVQP(P+a!MTL||&a)X{O@UzEa!XmRlp WY$iCwWlaV + + + + + + + + + + + + + + + + + + + + + + Write A New Model - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

模型编写指南

+

本文档提供MindYOLO编写自定义模型的教程。
+分为三个部分: +- 模型定义:我们可以直接定义一个网络,也可以使用yaml文件方式定义一个网络。 +- 注册模型:可选,注册之后可以在create_model接口中使用文件名创建自定义的模型 +- 验证: 验证模型是否可运行

+

模型定义

+

1.直接使用python代码来编写网络

+

模块导入

+

导入MindSpore框架中的nn模块和ops模块,用于定义神经网络的组件和操作。 +

import mindspore.nn as nn
+import mindspore.ops.operations as ops
+

+

创建模型

+

定义了一个继承自nn.Cell的模型类MyModel。在构造函数__init__中,定义模型的各个组件:

+
class MyModel(nn.Cell):
+    def __init__(self):
+        super(MyModel, self).__init__()
+        #conv1是一个2D卷积层,输入通道数为3,输出通道数为16,卷积核大小为3x3,步长为1,填充为1。
+        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
+        #relu是一个ReLU激活函数操作。
+        self.relu = ops.ReLU()
+        #axpool是一个2D最大池化层,池化窗口大小为2x2,步长为2。
+        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
+        #conv2是另一个2D卷积层,输入通道数为16,输出通道数为32,卷积核大小为3x3,步长为1,填充为1。
+        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
+        #fc是一个全连接层,输入特征维度为32x8x8,输出特征维度为10。
+        self.fc = nn.Dense(32 * 8 * 8, 10)
+
+    #在construct方法中,定义了模型的前向传播过程。输入x经过卷积、激活函数、池化等操作后,通过展平操作将特征张量变为一维向量,然后通过全连接层得到最终的输出结果。    
+    def construct(self, x): 
+        x = self.conv1(x)
+        x = self.relu(x)
+        x = self.maxpool(x)
+        x = self.conv2(x)
+        x = self.relu(x)
+        x = self.maxpool(x)
+        x = x.view(x.shape[0], -1)
+        x = self.fc(x)
+        return x
+
+

创建模型实例

+

通过实例化MyModel类,创建一个模型实例model,后续可以使用该实例进行模型的训练和推理。 +

model = MyModel()
+

+

2.使用yaml文件编写网络

+

通常需要以下三个步骤 +- 新建一个mymodel.yaml文件 +- 新建对应的mymodel.py文件 +- 在mindyolo/models/init.py文件中引入该模型

+

以下是编写mymodel.yaml文件的详细指导:
+以编写一个简单网络为例: +以yaml格式编写必要参数,后续在mymodel.py文件里面可以用到这些参数。 +其中network部分为模型网络
+[[from, number, module, args], ...]:每个元素代表一个网络层的配置。
+

# __BASE__中的yaml表示用于继承的基础配置文件,重复的参数会被当前文件覆盖;
+__BASE__:
+  - '../coco.yaml'
+  - './hyp.scratch-high.yaml'
+
+per_batch_size: 32
+img_size: 640
+sync_bn: False
+
+network:
+  model_name: mymodel
+  depth_multiple: 1.0  # model depth multiple
+  width_multiple: 1.0  # layer channel multiple
+  stride: [ 8, 16, 32 ]
+
+  # 骨干网络部分的配置,每层的元素含义为
+  # [from, number, module, args]
+  # 以第一层为例,[-1, 1, ConvNormAct, [32, 3, 1]], 表示输入来自 `-1`(上一层) ,重复次数为 1,模块名为 ConvNormAct,模块输入参数为 [32, 3, 1];
+  backbone: 
+    [[-1, 1, ConvNormAct, [32, 3, 1]],  # 0
+     [-1, 1, ConvNormAct, [64, 3, 2]],  # 1-P1/2
+     [-1, 1, Bottleneck, [64]],
+     [-1, 1, ConvNormAct, [128, 3, 2]],  # 3-P2/4
+     [-1, 2, Bottleneck, [128]],
+     [-1, 1, ConvNormAct, [256, 3, 2]],  # 5-P3/8
+     [-1, 8, Bottleneck, [256]],
+      ]
+
+  #head部分的配置 
+  head: 
+    [
+    [ -1, 1, ConvNormAct, [ 512, 3, 2 ] ],  # 7-P4/16
+      [ -1, 8, Bottleneck, [ 512 ] ],
+      [ -1, 1, ConvNormAct, [ 1024, 3, 2 ] ],  # 9-P5/32
+      [ -1, 4, Bottleneck, [ 1024 ] ],  # 10
+    ]
+

+

编写mymodel.py文件:

+

模块导入

+

需要导入套件内的模块。 如from .registry import register_model等等

+
import numpy as np
+
+import mindspore as ms
+from mindspore import Tensor, nn
+
+
+from .initializer import initialize_defult #用于初始化模型的默认参数,包括权重初始化方式、BN 层参数等。
+from .model_factory import build_model_from_cfg #用于根据 YAML 配置文件中的参数构建目标检测模型,并返回该模型的实例。
+from .registry import register_model #用于将自定义的模型注册到 Mindyolo 中,以便在 YAML 配置文件中使用。
+
+#可见性声明
+__all__ = ["MYmodel", "mymodel"]
+
+

创建配置字典

+

_cfg函数是一个辅助函数,用于创建配置字典。它接受一个url参数和其他关键字参数,并返回一个包含url和其他参数的字典。
+default_cfgs是一个字典,用于存储默认配置。在这里,mymodel作为键,使用_cfg函数创建了一个配置字典。 +

def _cfg(url="", **kwargs):
+    return {"url": url, **kwargs}
+
+default_cfgs = {"mymodel": _cfg(url="")}
+

+

创建模型

+

MindSpore中,模型的类继承于nn.Cell,一般来说需要重载以下两个函数:

+
    +
  • __init__函数中,应当定义模型中需要用到的module层。
  • +
  • construct函数中定义模型前向逻辑。
  • +
+
class MYmodel(nn.Cell):
+
+    def __init__(self, cfg, in_channels=3, num_classes=None, sync_bn=False):
+        super(MYmodel, self).__init__()
+        self.cfg = cfg
+        self.stride = Tensor(np.array(cfg.stride), ms.int32)
+        self.stride_max = int(max(self.cfg.stride))
+        ch, nc = in_channels, num_classes
+
+        self.nc = nc  # override yaml value
+        self.model = build_model_from_cfg(model_cfg=cfg, in_channels=ch, num_classes=nc, sync_bn=sync_bn)
+        self.names = [str(i) for i in range(nc)]  # default names
+
+        initialize_defult()  # 可选,你可能需要initialize_defult方法以获得和pytorch一样的conv2d、dense层的初始化方式;
+
+    def construct(self, x):
+        return self.model(x)
+
+

注册模型(可选)

+

如果需要使用mindyolo接口初始化自定义的模型,那么需要先对模型进行**注册**和**导入**

+

模型注册
+

@register_model #注册后的模型可以通过 create_model 接口以模型名的方式进行访问;
+def mymodel(cfg, in_channels=3, num_classes=None, **kwargs) -> MYmodel:
+    """Get GoogLeNet model.
+    Refer to the base class `models.GoogLeNet` for more details."""
+    model = MYmodel(cfg=cfg, in_channels=in_channels, num_classes=num_classes, **kwargs)
+    return model
+
+模型导入

+
#在mindyolo/models/_init_.py文件中添加以下代码
+
+from . import mymodel #mymodel.py文件通常放在mindyolo/models/目录下
+__all__.extend(mymodel.__all__)
+from .mymodel import *
+
+

验证main

+

初始编写阶段应当保证模型是可运行的。可通过下述代码块进行基础验证: +首先导入所需的模块和函数。然后,通过解析配置对象。

+

if __name__ == "__main__":
+    from mindyolo.models.model_factory import create_model
+    from mindyolo.utils.config import parse_config
+
+    opt = parse_config()
+
+创建模型并指定相关参数,注意:如果要在create_model中使用文件名创建自定义的模型,那么需要先使用注册器@register_model进行注册,请参见上文 注册模型(可选)部分内容 +
    model = create_model(
+        model_name="mymodel",
+        model_cfg=opt.net,
+        num_classes=opt.data.nc,
+        sync_bn=opt.sync_bn if hasattr(opt, "sync_bn") else False,
+    ) 
+

+

否则,请使用import的方式引入模型

+

    from mindyolo.models.mymodel import MYmodel
+    model = MYmodel(
+        model_name="mymodel",
+        model_cfg=opt.net,
+        num_classes=opt.data.nc,
+        sync_bn=opt.sync_bn if hasattr(opt, "sync_bn") else False,
+    ) 
+
+最后,创建一个输入张量x并将其传递给模型进行前向计算。 +
    x = Tensor(np.random.randn(1, 3, 640, 640), ms.float32)
+    out = model(x)
+    out = out[0] if isinstance(out, (list, tuple)) else out
+    print(f"Output shape is {[o.shape for o in out]}")
+

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/index.html b/zh/index.html new file mode 100644 index 00000000..724480b9 --- /dev/null +++ b/zh/index.html @@ -0,0 +1,1164 @@ + + + + + + + + + + + + + + + + + + + + + + 主页 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO

+

+ + docs + + + GitHub + + + PRs Welcome + +

+ +

MindYOLO是MindSpore Lab开发的AI套件,实现了最先进的YOLO系列算法,查看支持的模型算法

+

MindYOLO使用Python语言编写,基于 MindSpore AI框架开发。

+

master 分支配套 MindSpore 2.0

+

+

新特性

+
    +
  • 2023/06/15
  • +
+
    +
  1. 支持 YOLOv3/v4/v5/v7/v8/X 等6个模型,发布了23个模型weights,详情请参考 MODEL ZOO
  2. +
  3. 配套 MindSpore 2.0。
  4. +
  5. 支持 MindSpore lite 2.0 推理。
  6. +
  7. 新的教程文档上线!
  8. +
+

基准和模型仓库

+

查看 MODEL ZOO.

+
+支持的算法 + +
+

安装

+

查看 INSTALLATION

+

快速入门

+

查看 GETTING STARTED

+

了解 MindYOLO 的更多信息

+

敬请期待

+

注意

+

⚠️当前版本基于GRAPH的静态Shape。后续将添加PYNATIVE的动态Shape支持,敬请期待。

+

贡献方式

+

我们感谢开发者用户的所有贡献,包括提issue和PR,一起让MindYOLO变得更好。

+

贡献指南请参考CONTRIBUTING.md

+

许可证

+

MindYOLO遵循Apache License 2.0开源协议。

+

致谢

+

MindYOLO是一个欢迎任何贡献和反馈的开源项目。我们希望通过提供灵活且标准化的工具包来重新实现现有方法和开发新的实时目标检测方法,从而为不断发展的研究社区服务。

+

引用

+

如果你觉得MindYOLO对你的项目有帮助,请考虑引用:

+
@misc{MindSpore Object Detection YOLO 2023,
+    title={{MindSpore Object Detection YOLO}:MindSpore Object Detection YOLO Toolbox and Benchmark},
+    author={MindSpore YOLO Contributors},
+    howpublished = {\url{https://github.com/mindspore-lab/mindyolo}},
+    year={2023}
+}
+
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/installation/index.html b/zh/installation/index.html new file mode 100644 index 00000000..14014c36 --- /dev/null +++ b/zh/installation/index.html @@ -0,0 +1,960 @@ + + + + + + + + + + + + + + + + + + + + 安装 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

安装

+

依赖

+
    +
  • mindspore >= 2.0
  • +
  • numpy >= 1.17.0
  • +
  • pyyaml >= 5.3
  • +
  • openmpi 4.0.3 (分布式训练所需)
  • +
+

为了安装python相关库依赖,只需运行:

+
pip install -r requirements.txt
+
+

MindSpore可以通过遵循官方指引,在不同的硬件平台上获得最优的安装体验。 为了在分布式模式下运行,您还需要安装OpenMPI

+

⚠️ 当前版本仅支持Ascend平台,GPU会在后续支持,敬请期待。

+

PyPI源安装

+

MindYOLO 发布为一个Python包并能够通过pip进行安装。我们推荐您在虚拟环境安装使用。 打开终端,输入以下指令来安装 MindYOLO:

+
pip install mindyolo
+
+

源码安装 (未经测试版本)

+

from VSC

+
pip install git+https://github.com/mindspore-lab/mindyolo.git
+
+

from local src

+

由于本项目处于活跃开发阶段,如果您是开发者或者贡献者,请优先选择此安装方式。

+

MindYOLO 可以在由 GitHub 克隆仓库到本地文件夹后直接使用。 这对于想使用最新版本的开发者十分方便:

+
git clone https://github.com/mindspore-lab/mindyolo.git
+
+

在克隆到本地之后,推荐您使用"可编辑"模式进行安装,这有助于解决潜在的模块导入问题。

+
cd mindyolo
+pip install -e .
+
+

另外, 我们提供了一个可选的 fast coco api 接口用于提升验证过程的速度。代码是以C++形式提供的,可以尝试用以下的命令进行安装 (此操作是可选的) :

+
cd mindyolo/csrc
+sh build.sh
+
+ + + + + + + + +
+
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/modelzoo/index.html b/zh/modelzoo/index.html new file mode 100644 index 00000000..628679bb --- /dev/null +++ b/zh/modelzoo/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + + + + + + Benchmark - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

模型仓库

+ +

MindYOLO Model Zoo and Baselines

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND910x8-G640MS COCO 201737.23.2M8.7Gyamlweights
YOLOv8SD910x8-G640MS COCO 201744.611.2M28.6Gyamlweights
YOLOv8MD910x8-G640MS COCO 201750.525.9M78.9Gyamlweights
YOLOv8LD910x8-G640MS COCO 201752.843.7M165.2Gyamlweights
YOLOv8XD910x8-G640MS COCO 201753.768.2M257.8Gyamlweights
YOLOv7TinyD910x8-G640MS COCO 201737.56.2M13.8Gyamlweights
YOLOv7LD910x8-G640MS COCO 201750.836.9M104.7Gyamlweights
YOLOv7XD910x8-G640MS COCO 201752.471.3M189.9Gyamlweights
YOLOv5ND910x8-G640MS COCO 201727.31.9M4.5Gyamlweights
YOLOv5SD910x8-G640MS COCO 201737.67.2M16.5Gyamlweights
YOLOv5MD910x8-G640MS COCO 201744.921.2M49.0Gyamlweights
YOLOv5LD910x8-G640MS COCO 201748.546.5M109.1Gyamlweights
YOLOv5XD910x8-G640MS COCO 201750.586.7M205.7Gyamlweights
YOLOv4CSPDarknet53D910x8-G608MS COCO 201745.427.6M52Gyamlweights
YOLOv4CSPDarknet53(silu)D910x8-G608MS COCO 201745.827.6M52Gyamlweights
YOLOv3Darknet53D910x8-G640MS COCO 201745.561.9M156.4Gyamlweights
YOLOXND910x8-G416MS COCO 201724.10.9M1.1Gyamlweights
YOLOXTinyD910x8-G416MS COCO 201733.35.1M6.5Gyamlweights
YOLOXSD910x8-G640MS COCO 201740.79.0M26.8Gyamlweights
YOLOXMD910x8-G640MS COCO 201746.725.3M73.8Gyamlweights
YOLOXLD910x8-G640MS COCO 201749.254.2M155.6Gyamlweights
YOLOXXD910x8-G640MS COCO 201751.699.1M281.9Gyamlweights
YOLOXDarknet53D910x8-G640MS COCO 201747.763.7M185.3Gyamlweights
+


+

Depoly inference

+ +

Notes

+
    +
  • Context: Training context denoted as {device}x{pieces}-{MS mode}, where mindspore mode can be G - graph mode or F - pynative mode with ms function. For example, D910x8-G is for training on 8 pieces of Ascend 910 NPU using graph mode.
  • +
  • Box mAP: Accuracy reported on the validation set.
  • +
+ + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/notes/changelog/index.html b/zh/notes/changelog/index.html new file mode 100644 index 00000000..f9882868 --- /dev/null +++ b/zh/notes/changelog/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + 更新日志 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

更新日志

+

即将到来

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/notes/code_of_conduct/index.html b/zh/notes/code_of_conduct/index.html new file mode 100644 index 00000000..94b444f6 --- /dev/null +++ b/zh/notes/code_of_conduct/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + 行为准则 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

行为准则

+

即将到来

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/notes/contributing/index.html b/zh/notes/contributing/index.html new file mode 100644 index 00000000..538ec2d5 --- /dev/null +++ b/zh/notes/contributing/index.html @@ -0,0 +1,1192 @@ + + + + + + + + + + + + + + + + + + + + + + + + Contributing - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

MindYOLO contributing guidelines

+ + + + + +

Contributor License Agreement

+

It's required to sign CLA before your first code submission to MindYOLO community.

+

For individual contributor, please refer to ICLA online document for the detailed information.

+

Getting Started

+ +

Contribution Workflow

+

Code style

+

Please follow this style to make MindYOLO easy to review, maintain and develop.

+
    +
  • +

    Coding guidelines

    +

    The Python coding style suggested by Python PEP 8 Coding Style and C++ coding style suggested by Google C++ Coding Guidelines are used in MindYOLO community. The CppLint, CppCheck, CMakeLint, CodeSpell, Lizard, ShellCheck and PyLint are used to check the format of codes, installing these plugins in your IDE is recommended.

    +
  • +
  • +

    Unittest guidelines

    +

    The Python unittest style suggested by pytest and C++ unittest style suggested by Googletest Primer are used in MindYOLO community. The design intent of a testcase should be reflected by its name of comment.

    +
  • +
  • +

    Refactoring guidelines

    +

    We encourage developers to refactor our code to eliminate the code smell. All codes should conform to needs to the coding style and testing style, and refactoring codes are no exception. Lizard threshold for nloc (lines of code without comments) is 100 and for cnc (cyclomatic complexity number) is 20, when you receive a Lizard warning, you have to refactor the code you want to merge.

    +
  • +
  • +

    Document guidelines

    +

    We use MarkdownLint to check the format of markdown documents. MindYOLO CI modifies the following rules based on the default configuration. +- MD007 (unordered list indentation): The indent parameter is set to 4, indicating that all content in the unordered list needs to be indented using four spaces. +- MD009 (spaces at the line end): The br_spaces parameter is set to 2, indicating that there can be 0 or 2 spaces at the end of a line. +- MD029 (sequence numbers of an ordered list): The style parameter is set to ordered, indicating that the sequence numbers of the ordered list are in ascending order.

    +

    For details, please refer to RULES.

    +
  • +
+

Fork-Pull development model

+
    +
  • +

    Fork MindYOLO repository

    +

    Before submitting code to MindYOLO project, please make sure that this project have been forked to your own repository. It means that there will be parallel development between MindYOLO repository and your own repository, so be careful to avoid the inconsistency between them.

    +
  • +
  • +

    Clone the remote repository

    +

    If you want to download the code to the local machine, git is the best way:

    +
    # For GitHub
    +git clone https://github.com/{insert_your_forked_repo}/mindyolo.git
    +git remote add upstream https://github.com/mindspore-lab/mindyolo.git
    +
    +
  • +
  • +

    Develop code locally

    +

    To avoid inconsistency between multiple branches, checking out to a new branch is SUGGESTED:

    +
    git checkout -b {new_branch_name} origin/master
    +
    +

    Taking the master branch as an example, MindYOLO may create version branches and downstream development branches as needed, please fix bugs upstream first. +Then you can change the code arbitrarily.

    +
  • +
  • +

    Push the code to the remote repository

    +

    After updating the code, you should push the update in the formal way:

    +
    git add .
    +git status # Check the update status
    +git commit -m "Your commit title"
    +git commit -s --amend #Add the concrete description of your commit
    +git push origin {new_branch_name}
    +
    +
  • +
  • +

    Pull a request to MindYOLO repository

    +

    In the last step, your need to pull a compare request between your new branch and MindYOLO master branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test. Your pull request should be merged into the upstream master branch as soon as possible to reduce the risk of merging.

    +
  • +
+

Report issues

+

A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it!

+

When reporting issues, refer to this format:

+
    +
  • What version of env (MindSpore, os, python, MindYOLO etc) are you using?
  • +
  • Is this a BUG REPORT or FEATURE REQUEST?
  • +
  • What kind of issue is, add the labels to highlight it on the issue dashboard.
  • +
  • What happened?
  • +
  • What you expected to happen?
  • +
  • How to reproduce it?(as minimally and precisely as possible)
  • +
  • Special notes for your reviewers?
  • +
+

Issues advisory:

+
    +
  • If you find an unclosed issue, which is exactly what you are going to solve, please put some comments on that issue to tell others you would be in charge of it.
  • +
  • If an issue is opened for a while, it's recommended for contributors to precheck before working on solving that issue.
  • +
  • If you resolve an issue which is reported by yourself, it's also required to let others know before closing that issue.
  • +
  • If you want the issue to be responded as quickly as possible, please try to label it, you can find kinds of labels on Label List
  • +
+

Propose PRs

+
    +
  • Raise your idea as an issue on GitHub
  • +
  • If it is a new feature that needs lots of design details, a design proposal should also be submitted.
  • +
  • After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
  • +
  • None of PRs is not permitted until it receives 2+ LGTM from approvers. Please NOTICE that approver is NOT allowed to add LGTM on his own PR.
  • +
  • After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.
  • +
+

PRs advisory:

+
    +
  • Any irrelevant changes should be avoided.
  • +
  • Make sure your commit history being ordered.
  • +
  • Always keep your branch up with the master branch.
  • +
  • For bug-fix PRs, make sure all related issues being linked.
  • +
+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/notes/faq/index.html b/zh/notes/faq/index.html new file mode 100644 index 00000000..fffd77c4 --- /dev/null +++ b/zh/notes/faq/index.html @@ -0,0 +1,910 @@ + + + + + + + + + + + + + + + + + + + + + + 常见问题 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

常见问题

+

即将到来

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/reference/data/index.html b/zh/reference/data/index.html new file mode 100644 index 00000000..f09d0ac9 --- /dev/null +++ b/zh/reference/data/index.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + + + + + + + + + + + + + data - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/reference/loss/index.html b/zh/reference/loss/index.html new file mode 100644 index 00000000..b018b554 --- /dev/null +++ b/zh/reference/loss/index.html @@ -0,0 +1,888 @@ + + + + + + + + + + + + + + + + + + + + Loss - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ +
+ + + + +
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/reference/models/index.html b/zh/reference/models/index.html new file mode 100644 index 00000000..687e1c1a --- /dev/null +++ b/zh/reference/models/index.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + + + + + + + + + + + + models - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Models

+

Create Model

+ + + +
+ + + +

+mindyolo.models.model_factory.create_model(model_name, model_cfg=None, in_channels=3, num_classes=80, checkpoint_path='', **kwargs) + +

+ + +
+ +
+ Source code in mindyolo/models/model_factory.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
def create_model(
+    model_name: str,
+    model_cfg: dict = None,
+    in_channels: int = 3,
+    num_classes: int = 80,
+    checkpoint_path: str = "",
+    **kwargs,
+):
+    model_args = dict(cfg=model_cfg, num_classes=num_classes, in_channels=in_channels)
+    kwargs = {k: v for k, v in kwargs.items() if v is not None}
+
+    if not is_model(model_name):
+        raise RuntimeError(f"Unknown model {model_name}")
+
+    create_fn = model_entrypoint(model_name)
+    model = create_fn(**model_args, **kwargs)
+
+    if checkpoint_path:
+        assert os.path.isfile(checkpoint_path) and checkpoint_path.endswith(
+            ".ckpt"
+        ), f"[{checkpoint_path}] not a ckpt file."
+        checkpoint_param = load_checkpoint(checkpoint_path)
+        load_param_into_net(model, checkpoint_param)
+        logger.info(f"Load checkpoint from [{checkpoint_path}] success.")
+
+    return model
+
+
+
+ +

yolov3_head

+

yolov4_head

+

yolov5_head

+

yolov7_head

+

yolov8_head

+

yolox_head

+

initializer

+

focal_loss

+

iou_loss

+

label_assignment

+

loss_factory

+

yolov3_loss

+

yolov4_loss

+

yolov5_loss

+

yolov7_loss

+

yolov8_loss

+

yolox_loss

+

yolov3

+

yolov4

+

yolov5

+

yolov7

+

yolov8

+

yolox

+ + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/tutorials/configuration/index.html b/zh/tutorials/configuration/index.html new file mode 100644 index 00000000..568bdcde --- /dev/null +++ b/zh/tutorials/configuration/index.html @@ -0,0 +1,1145 @@ + + + + + + + + + + + + + + + + + + + + + + + + 配置 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

配置

+ +

配置

+

MindYOLO套件同时支持yaml文件参数和命令行参数解析,并将相对固定、与模型强相关、较为复杂或者含有嵌套结构的参数编写成yaml文件,需根据实际应用场景更改或者较为简单的参数则通过命令行传入。

+

下面以yolov3为例,解释如何配置相应的参数。

+

参数继承关系

+

参数优先级由高到低如下,出现同名参数时,低优先级参数会被高优先级参数覆盖

+
    +
  • 用户命令行传入参数
  • +
  • python执行py文件中parser的默认参数
  • +
  • 命令行传入config参数对应的yaml文件参数
  • +
  • 命令行传入config参数对应的yaml文件中__BASE__参数中包含的yaml文件参数,例如yolov3.yaml含有如下参数: +
    __BASE__: [
    +  '../coco.yaml',
    +  './hyp.scratch.yaml',
    +]
    +
  • +
+

基础参数

+

参数说明

+
    +
  • device_target: 所用设备,Ascend/GPU/CPU
  • +
  • save_dir: 运行结果保存路径,默认为./runs
  • +
  • log_interval: 打印日志step间隔,默认为100
  • +
  • is_parallel: 是否分布式训练,默认为False
  • +
  • ms_mode: 使用静态图模式(0)或动态图模式(1),默认为0。
  • +
  • config: yaml配置文件路径
  • +
  • per_batch_size: 每张卡batch size,默认为32
  • +
  • epochs: 训练epoch数,默认为300
  • +
  • ...
  • +
+

parse参数设置

+

该部分参数通常由命令行传入,示例如下:

+
mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True --log_interval 50
+
+

数据集

+

参数说明

+
    +
  • dataset_name: 数据集名称
  • +
  • train_set: 训练集所在路径
  • +
  • val_set: 验证集所在路径
  • +
  • test_set: 测试集所在路径
  • +
  • nc: 数据集类别数
  • +
  • names: 类别名称
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/coco.yaml中定义,通常需修改其中的数据集路径

+

```yaml +data: + dataset_name: coco

+

train_set: ./coco/train2017.txt # 118287 images + val_set: ./coco/val2017.txt # 5000 images + test_set: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

+

nc: 80

+

# class names + names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] + ```

+

数据增强

+

参数说明

+
    +
  • num_parallel_workers: 读取数据的工作进程数
  • +
  • train_transformers: 训练过程数据增强
  • +
  • test_transformers: 验证过程数据增强
  • +
  • ...
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,其中train_transformers和test_transformers均为由字典组成的列表,各字典包含数据增强操作名称、发生概率及该增强方法相关的参数

+

```yaml +data: + num_parallel_workers: 4

+

train_transforms: + - { func_name: mosaic, prob: 1.0, mosaic9_prob: 0.0, translate: 0.1, scale: 0.9 } + - { func_name: mixup, prob: 0.1, alpha: 8.0, beta: 8.0, needed_mosaic: True } + - { func_name: hsv_augment, prob: 1.0, hgain: 0.015, sgain: 0.7, vgain: 0.4 } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: albumentations } + - { func_name: fliplr, prob: 0.5 } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True }

+

test_transforms: + - { func_name: letterbox, scaleup: False } + - { func_name: label_norm, xyxy2xywh_: True } + - { func_name: label_pad, padding_size: 160, padding_value: -1 } + - { func_name: image_norm, scale: 255. } + - { func_name: image_transpose, bgr2rgb: True, hwc2chw: True } + ```

+

模型

+

参数说明

+
    +
  • model_name: 模型名称
  • +
  • depth_multiple: 模型深度因子
  • +
  • width_multiple: 模型宽度因子
  • +
  • stride: 特征图下采样倍数
  • +
  • anchors: 预设锚框
  • +
  • backbone: 模型骨干网络
  • +
  • head: 模型检测头
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/yolov3.yaml中定义,根据backbon和head参数进行网络构建,参数以嵌套列表的形式呈现,每行代表一层模块,包含4个参数,分别是 输入层编号(-1代表上一层)、模块重复次数、模块名称和模块相应参数。用户也可以不借助yaml文件而直接在py文件中定义和注册网络。 +```yaml +network: + model_name: yolov3

+

depth_multiple: 1.0 # model depth multiple + width_multiple: 1.0 # layer channel multiple + stride: [8, 16, 32] + anchors: + - [10,13, 16,30, 33,23] # P⅜ + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32

+

# darknet53 backbone + backbone: + # [from, number, module, args] + [[-1, 1, ConvNormAct, [32, 3, 1]], # 0 + [-1, 1, ConvNormAct, [64, 3, 2]], # 1-P½ + [-1, 1, Bottleneck, [64]], + [-1, 1, ConvNormAct, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, ConvNormAct, [256, 3, 2]], # 5-P⅜ + [-1, 8, Bottleneck, [256]], + [-1, 1, ConvNormAct, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, ConvNormAct, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ]

+

# YOLOv3 head + head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], + [-1, 1, ConvNormAct, [512, 1, 1]], + [-1, 1, ConvNormAct, [1024, 3, 1]], # 15 (P5/32-large)

+
 [-2, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, ConvNormAct, [256, 1, 1]],
+ [-1, 1, ConvNormAct, [512, 3, 1]],  # 22 (P4/16-medium)
+
+ [-2, 1, ConvNormAct, [128, 1, 1]],
+ [-1, 1, Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]],  # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, YOLOv3Head, [nc, anchors, stride]],   # Detect(P3, P4, P5)
+]
+
+

```

+

损失函数

+

参数说明

+
    +
  • name: 损失函数名称
  • +
  • box: box损失权重
  • +
  • cls: class损失权重
  • +
  • cls_pw: class损失正样本权重
  • +
  • obj: object损失权重
  • +
  • obj_pw: object损失正样本权重
  • +
  • fl_gamma: focal loss gamma
  • +
  • anchor_t: anchor shape比例阈值
  • +
  • label_smoothing: 标签平滑值
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义

+

yaml +loss: + name: YOLOv7Loss + box: 0.05 # box loss gain + cls: 0.5 # cls loss gain + cls_pw: 1.0 # cls BCELoss positive_weight + obj: 1.0 # obj loss gain (scale with pixels) + obj_pw: 1.0 # obj BCELoss positive_weight + fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) + anchor_t: 4.0 # anchor-multiple threshold + label_smoothing: 0.0 # label smoothing epsilon

+

优化器

+

参数说明

+
    +
  • optimizer: 优化器名称。
  • +
  • lr_init: 学习率初始值
  • +
  • warmup_epochs: warmup epoch数
  • +
  • warmup_momentum: warmup momentum初始值
  • +
  • warmup_bias_lr: warmup bias学习率初始值
  • +
  • min_warmup_step: 最小warmup step数
  • +
  • group_param: 参数分组策略
  • +
  • gp_weight_decay: 分组参数权重衰减系数
  • +
  • start_factor: 初始学习率因数
  • +
  • end_factor: 结束学习率因数
  • +
  • momentum:移动平均的动量
  • +
  • loss_scale:loss缩放系数
  • +
  • nesterov:是否使用Nesterov Accelerated Gradient (NAG)算法更新梯度。
  • +
+

yaml文件样例

+

该部分参数在configs/yolov3/hyp.scratch.yaml中定义,如下示例中经过warmup阶段后的初始学习率为lr_init * start_factor = 0.01 * 1.0 = 0.01, 最终学习率为lr_init * end_factor = 0.01 * 0.01 = 0.0001

+

yaml +optimizer: + optimizer: momentum + lr_init: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) + momentum: 0.937 # SGD momentum/Adam beta1 + nesterov: True # update gradients with NAG(Nesterov Accelerated Gradient) algorithm + loss_scale: 1.0 # loss scale for optimizer + warmup_epochs: 3 # warmup epochs (fractions ok) + warmup_momentum: 0.8 # warmup initial momentum + warmup_bias_lr: 0.1 # warmup initial bias lr + min_warmup_step: 1000 # minimum warmup step + group_param: yolov7 # group param strategy + gp_weight_decay: 0.0005 # group param weight decay 5e-4 + start_factor: 1.0 + end_factor: 0.01

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/tutorials/deployment/index.html b/zh/tutorials/deployment/index.html new file mode 100644 index 00000000..12d92728 --- /dev/null +++ b/zh/tutorials/deployment/index.html @@ -0,0 +1,1277 @@ + + + + + + + + + + + + + + + + + + + + + + + + 部署 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

部署

+ +

MindYOLO部署

+

依赖

+
pip install -r requirement.txt
+
+

MindSpore Lite环境准备

+

参考:Lite环境配置
+ 注意:MindSpore Lite适配的python环境为3.7,请在安装Lite前准备好python3.7的环境
+ 1. 根据环境,下载配套的tar.gz包和whl包 + 2. 解压tar.gz包并安装对应版本的whl包 +

tar -zxvf mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.tar.gz
+pip install mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.whl
+
+ 3. 配置Lite的环境变量 + LITE_HOME为tar.gz解压出的文件夹路径,推荐使用绝对路径 +
export LITE_HOME=/path/to/mindspore-lite-{version}-{os}-{platform}
+export LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH
+export PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH
+

+

快速开始

+

模型转换

+

ckpt模型转为mindir模型,此步骤可在CPU/Ascend910上运行 +

python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target [CPU/Ascend]
+e.g.
+# 在CPU上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU
+# 在Ascend上运行
+python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target Ascend
+

+

Lite Test

+
python deploy/test.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_config/yolo.yaml
+e.g.
+python deploy/test.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml
+
+

Lite Predict

+
python ./deploy/predict.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg
+e.g.
+python deploy/predict.py --model_type Lite --model_path ./yolov5n.mindir --conifg ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg
+
+

脚本说明

+
    +
  • predict.py 支持单张图片推理
  • +
  • test.py 支持COCO数据集推理
  • +
+

MindX部署

+

查看 MINDX

+

标准和支持的模型库

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameScaleContextImageSizeDatasetBox mAP (%)ParamsFLOPsRecipeDownload
YOLOv8ND310x1-G640MS COCO 201737.23.2M8.7Gyamlckpt
mindir
YOLOv8SD310x1-G640MS COCO 201744.611.2M28.6Gyamlckpt
mindir
YOLOv8MD310x1-G640MS COCO 201750.525.9M78.9Gyamlckpt
mindir
YOLOv8LD310x1-G640MS COCO 201752.843.7M165.2Gyamlckpt
mindir
YOLOv8XD310x1-G640MS COCO 201753.768.2M257.8Gyamlckpt
mindir
YOLOv7TinyD310x1-G640MS COCO 201737.56.2M13.8Gyamlckpt
mindir
YOLOv7LD310x1-G640MS COCO 201750.836.9M104.7Gyamlckpt
mindir
YOLOv7XD310x1-G640MS COCO 201752.471.3M189.9Gyamlckpt
mindir
YOLOv5ND310x1-G640MS COCO 201727.31.9M4.5Gyamlckpt
mindir
YOLOv5SD310x1-G640MS COCO 201737.67.2M16.5Gyamlckpt
mindir
YOLOv5MD310x1-G640MS COCO 201744.921.2M49.0Gyamlckpt
mindir
YOLOv5LD310x1-G640MS COCO 201748.546.5M109.1Gyamlckpt
mindir
YOLOv5XD310x1-G640MS COCO 201750.586.7M205.7Gyamlckpt
mindir
YOLOv4CSPDarknet53D310x1-G608MS COCO 201745.427.6M52Gyamlckpt
mindir
YOLOv4CSPDarknet53(silu)D310x1-G640MS COCO 201745.827.6M52Gyamlckpt
mindir
YOLOv3Darknet53D310x1-G640MS COCO 201745.561.9M156.4Gyamlckpt
mindir
YOLOXND310x1-G416MS COCO 201724.10.9M1.1Gyamlckpt
mindir
YOLOXTinyD310x1-G416MS COCO 201733.35.1M6.5Gyamlckpt
mindir
YOLOXSD310x1-G640MS COCO 201740.79.0M26.8Gyamlckpt
mindir
YOLOXMD310x1-G640MS COCO 201746.725.3M73.8Gyamlckpt
mindir
YOLOXLD310x1-G640MS COCO 201749.254.2M155.6Gyamlckpt
mindir
YOLOXXD310x1-G640MS COCO 201751.699.1M281.9Gyamlckpt
mindir
YOLOXDarknet53D310x1-G640MS COCO 201747.763.7M185.3Gyamlckpt
mindir
+


+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/tutorials/finetune/index.html b/zh/tutorials/finetune/index.html new file mode 100644 index 00000000..35b437a0 --- /dev/null +++ b/zh/tutorials/finetune/index.html @@ -0,0 +1,1080 @@ + + + + + + + + + + + + + + + + + + + + + + + + 微调 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

微调

+ +

自定义数据集finetune流程

+

本文以安全帽佩戴检测数据集(SHWD)为例,介绍自定义数据集在MindYOLO上进行finetune的主要流程。

+

数据集格式转换

+

SHWD数据集采用voc格式的数据标注,其文件目录如下所示: +

             ROOT_DIR
+                ├── Annotations
+                │        ├── 000000.xml
+                │        └── 000002.xml
+                ├── ImageSets
+                │       └── Main
+                │             ├── test.txt
+                │             ├── train.txt
+                │             ├── trainval.txt
+                │             └── val.txt
+                └── JPEGImages
+                        ├── 000000.jpg
+                        └── 000002.jpg
+
+其中,ImageSets/Main文件下的txt文件中每行代表相应子集中单张图片不含后缀的文件名,例如: +
000002
+000005
+000019
+000022
+000027
+000034
+

+

由于MindYOLO在验证阶段选用图片名称作为image_id,因此图片名称只能为数值类型,而不能为字符串类型,还需要对图片进行改名。对SHWD数据集格式的转换包含如下步骤: +* 将图片复制到相应的路径下并改名 +* 在根目录下相应的txt文件中写入该图片的相对路径 +* 解析xml文件,在相应路径下生成对应的txt标注文件 +* 验证集还需生成最终的json文件

+

详细实现可参考convert_shwd2yolo.py。运行方式如下:

+
python examples/finetune_SHWD/convert_shwd2yolo.py --root_dir /path_to_shwd/SHWD
+
+

运行以上命令将在不改变原数据集的前提下,在同级目录生成yolo格式的SHWD数据集。

+

预训练模型文件转换

+

由于SHWD数据集只有7000+张图片,选择yolov7-tiny进行该数据集的训练,可下载MindYOLO提供的在coco数据集上训练好的模型文件作为预训练模型。由于coco数据集含有80种物体类别,SHWD数据集只有两类,模型的最后一层head层输出与类别数nc有关,因此需将预训练模型文件的最后一层去掉, 可参考convert_yolov7-tiny_pretrain_ckpt.py。运行方式如下:

+
python examples/finetune_SHWD/convert_yolov7-tiny_pretrain_ckpt.py
+
+

模型微调(Finetune)

+

简要的训练流程可参考finetune_shwd.py

+
    +
  • 在多卡NPU/GPU上进行分布式模型训练,以8卡为例:
  • +
+
mpirun --allow-run-as-root -n 8 python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml --is_parallel True
+
+
    +
  • 在单卡NPU/GPU/CPU上训练模型:
  • +
+
python examples/finetune_SHWD/finetune_shwd.py --config ./examples/finetune_SHWD/yolov7-tiny_shwd.yaml 
+
+

注意:直接用yolov7-tiny默认coco参数在SHWD数据集上训练,可取得AP50 87.0的精度。将lr_init参数由0.01改为0.001,即可实现ap50为89.2的精度结果。

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/tutorials/modelarts/index.html b/zh/tutorials/modelarts/index.html new file mode 100644 index 00000000..3f4419c6 --- /dev/null +++ b/zh/tutorials/modelarts/index.html @@ -0,0 +1,926 @@ + + + + + + + + + + + + + + + + + + + + + + + + 云上启动 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/zh/tutorials/quick_start/index.html b/zh/tutorials/quick_start/index.html new file mode 100644 index 00000000..2b53fe85 --- /dev/null +++ b/zh/tutorials/quick_start/index.html @@ -0,0 +1,1106 @@ + + + + + + + + + + + + + + + + + + + + + + + + 快速开始 - MindYOLO Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

快速开始

+ +

MindYOLO 快速入门

+

本文简要介绍MindYOLO中内置的命令行工具的使用方法。

+

使用预训练模型进行推理

+
    +
  1. model zoo中选择一个模型及其配置文件,例如, ./configs/yolov7/yolov7.yaml.
  2. +
  3. model zoo中下载相应的预训练模型权重文件。
  4. +
  5. 使用内置配置进行推理,请运行以下命令:
  6. +
+
# NPU (默认)
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg
+
+# GPU
+python demo/predict.py --config ./configs/yolov7/yolov7.yaml --weight=/path_to_ckpt/WEIGHT.ckpt --image_path /path_to_image/IMAGE.jpg --device_target=GPU
+
+

有关命令行参数的详细信息,请参阅demo/predict.py -h,或查看其源代码

+
    +
  • 要在CPU上运行,请将device_target的值修改为CPU.
  • +
  • 结果将保存在./detect_results目录下
  • +
+

使用命令行进行训练和评估

+
    +
  • 按照YOLO格式准备您的数据集。如果使用COCO数据集(YOLO格式)进行训练,请从yolov5或darknet准备数据集.
  • +
+
+ +
  coco/
+    {train,val}2017.txt
+    annotations/
+      instances_{train,val}2017.json
+    images/
+      {train,val}2017/
+          00000001.jpg
+          ...
+          # image files that are mentioned in the corresponding train/val2017.txt
+    labels/
+      {train,val}2017/
+          00000001.txt
+          ...
+          # label files that are mentioned in the corresponding train/val2017.txt
+
+
+ +
    +
  • 在多卡NPU/GPU上进行分布式模型训练,以8卡为例:
  • +
+
mpirun --allow-run-as-root -n 8 python train.py --config ./configs/yolov7/yolov7.yaml  --is_parallel True
+
+
    +
  • 在单卡NPU/GPU/CPU上训练模型:
  • +
+
python train.py --config ./configs/yolov7/yolov7.yaml 
+
+
    +
  • 评估模型的精度:
  • +
+

python test.py --config ./configs/yolov7/yolov7.yaml --weight /path_to_ckpt/WEIGHT.ckpt
+
+注意:默认超参为8卡训练,单卡情况需调整部分参数。 默认设备为Ascend,您可以指定'device_target'的值为Ascend/GPU/CPU。 +* 有关更多选项,请参阅 train/test.py -h. +* 在云脑上进行训练,请在这里查看

+

部署

+

请在这里查看.

+

在代码中使用MindYOLO API

+

敬请期待

+ + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file