From 76c57958d7cec94221c0b12254452e06c79a6d14 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Tue, 23 Jan 2024 16:40:26 +0100 Subject: [PATCH 01/12] onnx implementation and PlotRuntimesMultipleParams implementation before refactoring --- .../ONNXRuntimeModule/plugins/BuildFile.xml | 6 + .../plugins/ONNXPluginRuntime.cpp | 216 ++++++++++++++++++ .../test/onnx_runtime_template_cfg.py | 64 ++++++ .../RuntimeMeasurement/plugins/TFRuntime.cpp | 4 +- examples/cnn/model.json | 3 +- examples/dnn_2_inputs/model.json | 3 +- examples/simple_dnn/model.json | 3 +- examples/simple_dnn/model_onnx.json | 16 ++ examples/simple_dnn/simple_dnn.onnx | Bin 0 -> 103448 bytes mlprof/tasks/parameters.py | 4 +- mlprof/tasks/runtime.py | 53 +++-- 11 files changed, 350 insertions(+), 22 deletions(-) create mode 100644 cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml create mode 100644 cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp create mode 100644 cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py create mode 100644 examples/simple_dnn/model_onnx.json create mode 100644 examples/simple_dnn/simple_dnn.onnx diff --git a/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml b/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml new file mode 100644 index 0000000..d807b98 --- /dev/null +++ b/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp b/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp new file mode 100644 index 0000000..e1b973a --- /dev/null +++ b/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp @@ -0,0 +1,216 @@ +/* + * Example plugin to demonstrate the direct multi-threaded inference with ONNX Runtime. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/stream/EDAnalyzer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h" + +#include "MLProf/Utils/interface/utils.h" + +using namespace cms::Ort; + +class ONNXRuntimePlugin : public edm::stream::EDAnalyzer> { +public: + explicit ONNXRuntimePlugin(const edm::ParameterSet &, const ONNXRuntime *); + static void fillDescriptions(edm::ConfigurationDescriptions&); + + static std::unique_ptr initializeGlobalCache(const edm::ParameterSet &); + static void globalEndJob(const ONNXRuntime *); + +private: + void beginJob(); + void analyze(const edm::Event&, const edm::EventSetup&); + void endJob(); + + inline float drawNormal() { return normalPdf_(rndGen_); } + + // parameters + std::vector inputTensorNames_; + std::vector outputTensorNames_; + std::string outputFile_; + std::string inputTypeStr_; + std::vector inputRanks_; + std::vector flatInputSizes_; + int batchSize_; + int nCalls_; + + // other members + int nInputs_; + int nPreCalls_; + mlprof::InputType inputType_; + std::random_device rnd_; + std::default_random_engine rndGen_; + std::normal_distribution normalPdf_; + + std::vector> input_shapes_; + FloatArrays data_; // each stream hosts its own data +}; + + +void ONNXRuntimePlugin::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // defining this function will lead to a *_cfi file being generated when compiling + edm::ParameterSetDescription desc; + // the path to the file containing the graph + desc.add("graphPath"); + // the names of the input tensors + desc.add>("inputTensorNames"); + // the names of the output tensors + desc.add>("outputTensorNames"); + // the name of the output csv file + desc.add("outputFile"); + // the type of input values, either "incremental" or "random" + desc.add("inputType", "random"); + // the rank (number of dimensions) of each input tensor + desc.add>("inputRanks"); + // flat list of sizes of each dimension of each input tensor + // (for a graph with a 1D and a 2D input tensor, this would be a vector of three values) + desc.add>("flatInputSizes"); + // batch sizes to test + desc.add("batchSize"); + // the number of calls to the graph to measure the runtime + desc.add("nCalls"); + + // desc.add("model_path", edm::FileInPath("MLProf/ONNXRuntimeModule/data/model.onnx")); + // desc.add>("input_names", std::vector({"my_input"})); + descriptions.addWithDefaultLabel(desc); +} + + +ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, const ONNXRuntime *cache) + : inputTensorNames_(iConfig.getParameter>("inputTensorNames")), + outputTensorNames_(iConfig.getParameter>("outputTensorNames")), + outputFile_(iConfig.getParameter("outputFile")), + inputTypeStr_(iConfig.getParameter("inputType")), + inputRanks_(iConfig.getParameter>("inputRanks")), + flatInputSizes_(iConfig.getParameter>("flatInputSizes")), + batchSize_(iConfig.getParameter("batchSize")), + nCalls_(iConfig.getParameter("nCalls")), + nInputs_(inputTensorNames_.size()), + nPreCalls_(10), + rndGen_(rnd_()), + normalPdf_(0.0, 1.0) + { + // the number of input ranks must match the number of input tensors + if ((int)inputRanks_.size() != nInputs_) { + throw cms::Exception("InvalidInputRanks") << "number of input ranks must match number of input tensors"; + } + // the input must be at least 1 dimensional + for (auto rank : inputRanks_) { + if (rank < 1) { + throw cms::Exception("InvalidRank") << "only ranks above 0 are supported, got " << rank; + } + } + // the sum of ranks must match the number of flat input sizes + if (std::accumulate(inputRanks_.begin(), inputRanks_.end(), 0) != (int)flatInputSizes_.size()) { + throw cms::Exception("InvalidFlatInputSizes") + << "sum of input ranks must match number of flat input sizes, got " << flatInputSizes_.size(); + } + // batch size must be positive + if (batchSize_ < 1) { + throw cms::Exception("InvalidBatchSize") << "batch sizes must be positive, got " << batchSize_; + } + + // input sizes must be positive + for (auto size : flatInputSizes_) { + if (size < 1) { + throw cms::Exception("InvalidInputSize") << "input sizes must be positive, got " << size; + } + } + // check the input type + if (inputTypeStr_ == "incremental") { + inputType_ = mlprof::InputType::Incremental; + } else if (inputTypeStr_ == "random") { + inputType_ = mlprof::InputType::Random; + } else if (inputTypeStr_ == "zeros") { + inputType_ = mlprof::InputType::Zeros; + } else { + throw cms::Exception("InvalidInputType") + << "input type must be either 'incremental', 'zeros' or 'random', got " << inputTypeStr_; + } + + // initialize the input_shapes array with inputRanks_ and flatInputSizes_ + int i = 0; + for (auto rank : inputRanks_) { + std::vector input_shape(flatInputSizes_.begin() + i, flatInputSizes_.begin() + i + rank); + input_shape.insert(input_shape.begin(), batchSize_); + input_shapes_.push_back(input_shape); + i += rank; + } + // initialize the input data arrays + // note there is only one element in the FloatArrays type (i.e. vector>) variable + for (int i = 0; i < nInputs_; i++) { + data_.emplace_back(flatInputSizes_[i] * batchSize_, 0); + } +} + + +std::unique_ptr ONNXRuntimePlugin::initializeGlobalCache(const edm::ParameterSet &iConfig) { + return std::make_unique(edm::FileInPath(iConfig.getParameter("graphPath")).fullPath()); +} + +void ONNXRuntimePlugin::globalEndJob(const ONNXRuntime *cache) {} + +void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, const edm::EventSetup &iSetup) { + for (int i = 0; i < nInputs_; i++) { + std::vector &group_data = data_[i]; + // fill the input + for (int i = 0; i < (int)group_data.size(); i++) { + group_data[i] = inputType_ == mlprof::InputType::Incremental ? float(i) : + inputType_ == mlprof::InputType::Zeros ? float(0) : + drawNormal(); + } + } + + // run prediction and get outputs + std::vector> outputs; + + // pre calls to "warm up" + for (int r = 0; r < nPreCalls_; r++) { + outputs = globalCache()->run(inputTensorNames_, data_, input_shapes_, outputTensorNames_, batchSize_); + // std::cout << "nprerun" << r << std::endl; + } + + // actual calls to measure runtimes + std::vector runtimes; + for (int r = 0; r < nCalls_; r++) { + auto start = std::chrono::high_resolution_clock::now(); + outputs = globalCache()->run(inputTensorNames_, data_, input_shapes_, outputTensorNames_, batchSize_); + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration runtime_in_seconds = (end - start); + // std::cout << "nrun" << r << std::endl; + // std::cout << "runtime in seconds" << runtime_in_seconds.count() << std::endl; + runtimes.push_back(runtime_in_seconds.count() * 1000); + } + + // // print the input and output data + // std::cout << "input data -> "; + // for ( const auto &input_tensor : data_ ){ + // for ( const auto &value : input_tensor ) std::cout << value << ' '; + // std::cout << std::endl; + // } + // std::cout << std::endl << "output data -> "; + // for (auto &output_tensor: outputs) { + // for ( const auto &value : output_tensor ) std::cout << value << ' '; + // std::cout << std::endl; + // } + // std::cout << std::endl; + + // save them + mlprof::writeRuntimes(outputFile_, batchSize_, runtimes); +} + + +DEFINE_FWK_MODULE(ONNXRuntimePlugin); diff --git a/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py b/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py new file mode 100644 index 0000000..1724a65 --- /dev/null +++ b/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +import FWCore.ParameterSet.Config as cms +from FWCore.ParameterSet.VarParsing import VarParsing + +# setup minimal options +options = VarParsing("python") +options.register( + "batchSizes", + [1], + VarParsing.multiplicity.list, + VarParsing.varType.int, + "Batch sizes to be tested", +) +options.register( + "csvFile", + "results.csv", + VarParsing.multiplicity.singleton, + VarParsing.varType.string, + "The path of the csv file to save results", +) +options.parseArguments() + + +# define the process to run +process = cms.Process("MLPROF") + +# minimal configuration +process.load("FWCore.MessageService.MessageLogger_cfi") +process.MessageLogger.cerr.FwkReport.reportEvery = 1 +process.maxEvents = cms.untracked.PSet( + input=cms.untracked.int32(__N_EVENTS__), # noqa +) +process.source = cms.Source( + "PoolSource", + fileNames=cms.untracked.vstring(*__INPUT_FILES__), # noqa +) + +# process options +process.options = cms.untracked.PSet( + allowUnscheduled=cms.untracked.bool(True), + wantSummary=cms.untracked.bool(False), +) + +# setup options for multithreaded +process.options.numberOfThreads=cms.untracked.uint32(1) +process.options.numberOfStreams=cms.untracked.uint32(0) +process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1) + + +# setup MyPlugin by loading the auto-generated cfi (see MyPlugin.fillDescriptions) +process.load("MLProf.ONNXRuntimeModule.onnxRuntimePlugin_cfi") +process.onnxRuntimePlugin.graphPath = cms.string("__GRAPH_PATH__") +process.onnxRuntimePlugin.inputTensorNames = cms.vstring(__INPUT_TENSOR_NAMES__) # noqa +process.onnxRuntimePlugin.outputTensorNames = cms.vstring(__OUTPUT_TENSOR_NAMES__) # noqa +process.onnxRuntimePlugin.outputFile = cms.string(options.csvFile) +process.onnxRuntimePlugin.inputType = cms.string("__INPUT_TYPE__") +process.onnxRuntimePlugin.inputRanks = cms.vint32(__INPUT_RANKS__) # noqa +process.onnxRuntimePlugin.flatInputSizes = cms.vint32(__FLAT_INPUT_SIZES__) # noqa +process.onnxRuntimePlugin.batchSize = cms.int32(options.batchSizes[0]) +process.onnxRuntimePlugin.nCalls = cms.int32(__N_CALLS__) # noqa + +# define what to run in the path +process.p = cms.Path(process.onnxRuntimePlugin) diff --git a/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp b/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp index e21b031..e0f2425 100644 --- a/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp +++ b/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp @@ -110,7 +110,7 @@ TFRuntime::TFRuntime(const edm::ParameterSet& config, const tensorflow::SessionC if ((int)inputRanks_.size() != nInputs_) { throw cms::Exception("InvalidInputRanks") << "number of input ranks must match number of input tensors"; } - // input ranks below 1 and above 3 are not supported + // the input must be at least 1 dimensional for (auto rank : inputRanks_) { if (rank < 1) { throw cms::Exception("InvalidRank") << "only ranks above 0 are supported, got " << rank; @@ -127,7 +127,7 @@ TFRuntime::TFRuntime(const edm::ParameterSet& config, const tensorflow::SessionC throw cms::Exception("InvalidBatchSize") << "batch sizes must be positive, got " << batchSize; } } - // input sizes must be postitive + // input sizes must be positive for (auto size : flatInputSizes_) { if (size < 1) { throw cms::Exception("InvalidInputSize") << "input sizes must be positive, got " << size; diff --git a/examples/cnn/model.json b/examples/cnn/model.json index caadd8c..3c3e3f2 100644 --- a/examples/cnn/model.json +++ b/examples/cnn/model.json @@ -11,5 +11,6 @@ "name": "Identity" } ], - "network_name": "cnn" + "network_name": "cnn", + "inference_engine": "tf" } diff --git a/examples/dnn_2_inputs/model.json b/examples/dnn_2_inputs/model.json index a6a3bee..d504b07 100644 --- a/examples/dnn_2_inputs/model.json +++ b/examples/dnn_2_inputs/model.json @@ -15,5 +15,6 @@ "name": "Identity" } ], - "network_name": "dnn_2_inputs" + "network_name": "dnn_2_inputs", + "inference_engine": "tf" } diff --git a/examples/simple_dnn/model.json b/examples/simple_dnn/model.json index 2fa9f8d..d424054 100644 --- a/examples/simple_dnn/model.json +++ b/examples/simple_dnn/model.json @@ -11,5 +11,6 @@ "name": "Identity" } ], - "network_name": "dnn" + "network_name": "dnn", + "inference_engine": "tf" } diff --git a/examples/simple_dnn/model_onnx.json b/examples/simple_dnn/model_onnx.json new file mode 100644 index 0000000..38317de --- /dev/null +++ b/examples/simple_dnn/model_onnx.json @@ -0,0 +1,16 @@ +{ + "file": "simple_dnn.onnx", + "inputs": [ + { + "name": "input_0", + "shape": [784] + } + ], + "outputs": [ + { + "name": "output_0" + } + ], + "network_name": "dnn_onnx", + "inference_engine": "onnx" +} diff --git a/examples/simple_dnn/simple_dnn.onnx b/examples/simple_dnn/simple_dnn.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6924f32a72afc8dc2f3a18f65bac38390079d9d2 GIT binary patch literal 103448 zcmb@tc{G;M`#x%(hccH^NP{U!k@vYbWiA?&RFs4yN`nTCq>yU{-#NcO&RM^;zMt>;t@W&TJ^$=|@4eq^-}~Ozbv<&jYO)c_EyGu@ z-aJ%!g878W<`yF-1qNDMPquwpDx+{#TtPNu^_mS4eijP$b0hpCf|hRxo9(|YA|xUt ze05NugMV1q`1L_+Hw3MY2=Naa9~iWHeb9Ih{|Ju_VdFi6`~&CvuM6>C78W#Xjje^+ zg#WMyTZ^IN|HCE*GXEpELXWt@e;!U@>VG-hv=IOGQv(D4zmv55PrDeHrNw{R#z1l) zwnCn`!ha*0+JAjbJ%hqF3=GfgzdXGEvw?xs|M{!%LR{fL4W!`kUkDVwA>#kS(Wdc;PyctI`Ab@z_OF6Q<5B*K-hEbd6;u zmv2&HR3f{tGMX0EHL&Xnscc)&L!6wVC#rI8WpfYh6Im>%g|op6Sz6O}X8qWWe0L_Y z^>#(no0JXBPH{|JWdW@|Ys6+oXOqO(pR7W8F7r*ECfq-nXGi#6=r*<_^Ap#}e8CKn zqQ11KCHp$5*hJEh~}K# z%UX=4i>h)B2h0dCmnAE5C|+6|-3Ew##h(l#L>p2RnI_b5ljHFWHDZ zRxTDfc)uX;v|19gehA(3MzQQO`fR^^9~%a# zc$y1ox`m|h%fT*vZ4=x5a-zsyw~oe5N)?5SO{Bt`=Asi?S|a~BULqGiCQA2Jv=h~T zBPG4VOeMdOO?f(Aw6H~2w7SKaUC|#bdaVBy97j5fqHkPa8aw8Rz8RL{jZ>3FPBypL zAiHBzs`a`^deK)Ht#D6JpEQ`|haMD7*lNwb$H$3e)P{?8$&3_TDotYAT}>?F?if+o zw~Jtvy&E4Ze}XRik2LMxAGUOgifFx~nrNoTmF&&dv4$ssEN#{dW|$el)Yh&Lg`BZt z8!o$x&P0s^ZrBUDq5A_@(q=M>#IwVPBWO5pkh zX_57)GD6`F_QTppG-a^5Xn58o@Jonf2TBgZBR|HZ-FLHhFQeG3%coi0BWb(TyVvPV zy(9afyc`=0vN@B$=j@8bW3qSZryC0GWVPuh#W`N4_=-!g<@i6A`KF#Fh3uo7F5f_3 zv6~r3{9oDhf6KWveeBKJCh&@Gr7{^g_A0-W?QAX~h>m7bN1WIcsb4V8JB|$2sELeQ z(`4iZo{4UIo+#yJ&)<#jbvhwO$Bi8iwZ~cV1~@5bA6CAP;b*DMVTrm{ zB$as_3M5>~-`|SguhuibtK6$GBcv17Y<hvjs1)kR~0ecqe!^PJO@24T7!eJBwaidO@{6Q3UfWn)pjVcEyFa}mBs1s z9Ms_7GHY}TF9cV^{do4h27P+I8SG1^(vQHE$d@Klx2h#wa-9RRk*8_wQFGW}B9C7d zMb+Oo*aXtmX|!YP9TJaMW=2*gaP+PD6zx`xlEHBVk_oWsUp%*U)h)jAm5|Tm z1f=t0sLyl~7%Vx*Z>!sjsY}#ZXWwd)`*xfvlIJk7clI>C=_;O5-3=!;G~vmzI2Lea zDO)A6gb7JmWYx8TM!wr2j5Oz&demG9rvz}|SCK`rCF#xI$&FW!WHL7GymjIsy1H!x zdCb-j)lVEG`cb~0JJ_|0(pz@ZsJ&v$(Dy!F(k;ZHB6oH+Q$P(fj?lE6VCHfv8I5`k zNyR*cnKfSnoAp|tylEx7CZ5Y?tsWsd`PrNXZ5hY+m)~O#_3ft|iGR#|>-9qVgvn2%4Mej^7A&>+x22@FeTzi1Qh;q8YuUMPJUx-bN*7M{$~=? zDEy9F#17&EjWP&K+lhxJ?j`fmbh5ZAfwy+c@il4l>B5d2Y_N8qs}ckMmu@ZNbCc=x z183C#(ThWR6hs0QDW=fbi@W?zGgH$l7Bu7o8G2rTd1)I!QuZ?}5X7;ohPUZlvbt!U zvLo}W55--f=>R2>ETq|&mAek0F2#!_g_gkYJAOQ6TlaV$Kt9rYU$ac7-? znu@Nn3X1~vY2*m{^6(Bbk@<}ij(F0L`%Y}b!CKBt?IM$Uw;7FE-_i6nzN|1vk8UeB zGER9B3%`>?!ttH>fs$c7j-@9mo}6T!E$cgP$sAvQ$BdRi(0ckiH$No-H=BJ#qj@!O z?SmPcIwOn8zAl8i=2$SAs{&h!7+!B;tb4l#e9$@sX=RzLt|AF$MBc@zp#{A7l)1EV ziXq8ueg$d2hOyCmCD}%|6zENtqh|rP_yvZ~_#j~m>RQV~n#TdKZFQi^g4HZc(Gbq( z1~ZF4SNWr}4cM*AxtLy+#l%Mqhp6%t{Gukw@&<=7i-RHTA6GA2m|X=+!aZzc^WvDC zK|jjGJ%F~L*|h0s8vQ!x$2RTrqM4osl>7MrwAxlMMGY@zyuTW@-Ij*{*YD^QASv>< ztwP_f5UM#~$pU&dLrEhtF%xYf>kVv%j2u>3__OzUk$8Mz493j;2g$42;9${3wrPJV zWLNis`}Za|xnvDfEb2s)Iqqz>?IiLTf0(XcKhL_}X49CAGt?m640o3Z+2M7Pc42AL znAtgNOcB>7?fh4Ct|y94Y}i6K1SQbtn*%|*)@)(Me#}_-}3tzQ?xy_Tr z!bt?d6^~$Hxf1(uFqkc07zp#$1as@!@3Y}qrMS4o03L%c-5upipW0pEL1+|y-FFWA z7b(N&x-2%ta|l}_=E#=YBw_y{Ct4=5r^YK?_+nT)KAd)nU4Np-%FLa>Zs!5qb6y?P z?wEm}mJ_oYG#dVl_r|tHE$RrdWAYOEZ1|gJ_~=b1lQdq1<$4ydX|5v^-nz>x1jI3& zP!-zJE<+AW@|mCBG3<#*V0t>btc{t`@e{qmki3h4bAtI|d23j3)qn{yBG|SuDR}5r z3MB7qiHU4O9U)G_7J42=Qq4ghayDZyZDkF1eL9Y%o5Oi&XA3;rbsEpB zPGy?EAL13Yr^2x#Qy4flklU?(Fg~wFAq(Qc^r9k5Oxj5b8|~S~_EOGn)^E;Z{`DPDXx~M3cXeqp)iaVTnUDp#5mz;iOU!*Dak4gUFno? z8{BRFg@)WTHon({S||8{*_ZL6t_%0s91jQP@Y;pH+z~f$EnJJL%VkKiXc`LMuVxv4 zj6_DV33Q;hNw{xTIx~O0mySzq!5t18NFr<>Zo8ezE(hAsf@ezXP2DWEcI8wG|1^gx zr03GyXQ}w;z;qmLn@wB&YxqN9S84pL-E8IjFf2Rc!CgE*S+rGC&Q9pKgYnsW1#SFt zvfsTAlHL!Yf^D;y`Zp7H;f1#7!l4|x+i-=ey#JiNv@7RknUuoJA!VeuTm=1Z?P2Na zE?oB4n+0Xfr^*eTf}5qKTRsOS=Z)KaB75>$o73Q72PX@;6Dj$f9plq zHa!5ITvoxO&p*Kf(}k=$rXD|^Nd(zKOHy|Cf$s+zX?^K_`u1Tb8E$%muUAKr;kUUg zLM4@Q?Br>i&Ng;SSr%RnPz{61jzZznu`E14n$FMCWs-UQ{QKB27V`O_@Q=YDe3-YM z62I0%ZE%I)Ql$+n7qmfu&Pc4Bsl#I0oM{r3`fN^2-Ib$mcauD9+W<`yn8}Uc6 zv@ne1LO1YoUZe5m`dcv5Qr{CqqWKA~sxcB$df2@c4lq7kd3C?ugjS=WXibWlr4X-X}*<>0U`v&`jnne#+BY z*$mjJq{eAGIAHVcZ0=%BB3hmC!+ZNfut`RS`CQbXn9;RDk)J2s{guea4%7uL3XOPT z;bu5FN)BddT)?HB@_5~J4|_D|Hb{Gi;7XH|=$W?~TSvy=iH>*ZDZUo;$8ACPY4S|= zn<2fKJ(y;#GH2Z6HXLvN7QY--r{D!GoTuwcSkhR6$}JHnxxNC$o|rLvTL;o;nF#sk zrqIa%Pr>Z&$!rxg z)|YTwp6!OmXLI@d{E0Zb^Ce2CJqFGr75gPl^QR^?V4Ua`EO`1?{;*4T`|Ndv(MLS74YuUnyjWFP=*Xq#s1(Mv-6k8~LuoVvlsIs;ECEjfv zp;Y~65Xt$`jODVkdmT3`={VfnA&o^*V{z}~n|$kr z*PPd6bM~&~32YHR4Gvw`FiiX*f9>5fjCpIqmbra{-?I|Y@R>Q;YfJIdx+%^6-B_VC?% zhr;e}I@qvQk(Q|Hkjk21ft8>hlXqT$=aM5?cZE0?<|E5C$PZ#WB06DykOfmxPeaeU zXRsyj4!$$26$JgZAnu0>*K{stpbn`+ne_{C-Odph9F&Ao;LTd=5~*vhD~=zVMA9EW zL(ataSfO-~J6JDGo6AN(5$7+Mzi2z&S}4QM$QFlT-lhEUAsX20G;khknc*M1=K{CA z-n44lKJICI3fn0WNP(`J^rhJsr7rfMY|9&ru0BS+F{yY0PT)Rk1=KOF6l$+Pt#b=WaMzp~0xE4My->uC_X*(sKAQ8}@6WVH z{zi?rT~O}x0t(e^De&zWZd|B5hU@y0dvYvA{A09l-XUSm$1M73nh1N|-{fD_h0fNFT7A4*9>uwjn#7i#ZAVq*<@D&)NWOQ$XX?q?O=6Qipvt9STrkst z&Ky&q8AXqA%+5)qy-uB$9Y<7qb%CsH61gTXF*3AwCNGOXmM2&(*xhv&Txa$}v)DE) zT~tShuVvC4wh7hKMWCC$lP_p863iVuie;pq!jk%_XkcN^WK0xk@+Nm;p7NZ*=*zVJ zO(7-KZKBYL3nAHXEF~q^<8;NbG}R!M7PcJWDs^?RXNw~_p4VirWQI{PQ^aFEJ}h$1 z9h_Nk9Y&S?!M}Q@^zQx#6y49G$iJFcKTMWReQCzJ3t8}GiwipyF@^S&+S7}yNz5Sq zF!j#WW|TLP%nq!eQ%k()p8PWMiOPIpaXi*8q6cnDBC+n( ztYdZ`iVS)oaa$dKB*lQ@uP@|{vT_ju1L*bNw|v7hfBGAj3&V$+QK$AO%sX+ApQi1P z(C`9Pwp3%MnLp>^GmP#IQ6%B;R+KDBpkX+eG@fM(-iED$VG513%f^ly5kH(oZ}@~0 zhb9Q;mB!QKdyO<^jVya7@Ilj%PvHHUW3PSJpfG$CZl9mUR=FO4l~Zox2CXIhDgXUY zx}ufi-%rJwqs&Z1k4U#);ZJB^7lP{YVtrGm^gxsnKGA+ zPP~Y!cchrzv@d*UW+u{vh3tsTJBV^m=E7WfUaqIoj&LlT}|KN!*!r*7rfx;_vWco+Z1tON_;ansT1oKY-oEGOlS)H9!2ix^4gb z73{|3we>jc5F}i_fb(Ul@r6Vd=2@5HU9CUd#I8M@&*M<2dT|+cj-3eNzv^LCybM!6 zdYqdyC=|ctcX4BWj$ye5*SXh`;jmP=j9X=K4_~KSu?P4Vx+or^Z=M5qzl{yLn+}h> zUDy+q3xW+JrtyxEEj+nYanb?}<~OH+jS5Tx=c}5mb6Xp?$*KSk4-e#H+QNiumcM~# z`)j~p*Ep~;*a=|Z$DCj<3S?w;RL!r{Kn<= zPXw9k<6zNcG4^rtUeGo^4Nr}M?e^Qi$~2RhV(Jl2+CiR6ktzf4I5AS*wU8ZCy9uqC zZcKTyGt|FW1$PXWW9^|%Uifz{2AVDghgt0~k@CT=rUz9A&ST?cj|X^AN2Y0a7&BxJ zL4l|je>BcOWs3ti+;an5)KFrdPbgBBojeo2V#fByJHwiQ8uod$DKn_K0!2GNqvk_@ zynMxo+3l6Hl{0l@a)yB{Vs)&bvb&uNU1h@cZtvqN-3!6{hKf+;=}yj0>oo>N9OM@r zNMl~ghr#x4%>b`-7!B1+4E6Mk)?TPx~V$IT~j{Cgj{MH#Z3z)5iaQ#(01 zc(c33`bZTvpC3bW zNu5CLSuK3Ze?$Qa7rCoRcfk7bF8s`-+R0H*iKf}5<8L~D|D*b1L5{DDF3usP*7?3`SU z=^LM8<$@gU_%B0t*>@~czah>%?=E5c1X3d1NincyvOOHwJeGFv_=qE}&4A=6GsIOcKn z@oVOq;O8R%Pk#0DAJ@9_I(&Pj##p_xw_14>k!taw1b@de5v#M3l#Lk zQ_4YUl6y74IkOn5NEt~J=NQBF-NnG)UPy~aIilALAKrOMEn2p$X8Wt>kY{tTaJ;<} zTG^{p(Fb|f5+_fqEbl|BfFL%+6sl6k(TFTp?%Vu-U>)FjLNN`zf;Cp-9HF-NATe0iOFh2?ZKqTyWP8s5zR$SNca`^)(wd%way4 z8tBWvvaz9cHm~`Qqb%ui@@q6X;|3K2GEPBd4|Hnj(EZd>?zzK4T&Z{vQUoH~p59}e8VK(sUfiW`^c5-<&iYz{522P*X&dt;; zJqvdOn@9mujyXVuYuAwJgA`mPn*(lv5vZ|#8HAr5#U@-ALkDLiR=(dGM{J8>O^YU= zhTV0%7;8Z^M4cUeUxR{sQ%LY|80McV;r=xWnD=H^@@&5dFZG7Ag+<39^H&?bdHw}- zKCGg9S-r5}wl=NV=s+EULzuoyjLrKeMWb7U5E)xQheP5yo8W=Fo0=3o?LQA;Zs+iK z@<{fpQ5;Qc$1}N4zxc8HcXNMRHq*|gGvMkh!E3thN7<`6+^|xB#j@ z`?efG!16@+JL!jQ>Hr@gr;x=B9qY&x%X_&+V`uY?(_&cbJ#m_5C{0Iob@)&D z;~>t#h=Sy<3(lF$p!B5S@X)#mM)hdWHmRMqcgdBG4b{dsS2nV5rkQy3LOGXUE=4DO z+iV0Qq^Uy352fC?a0&-j;rFUkzVY#JdhVW$L1IAzmI+DDeu^>cZfu5>6E!GRu!j$n zT93sy4ujEC1`7u7#g%hva7|?as^VnfpY38a*0vRwB)-5l|8eA;`Gk*4eu(evk_DD7 zQQ$gn1c^Vp30FcbnD%!`%JcaN<`#p&LuZgmJDuE%W#(8_mw|#>oj)WsKgT|3^D&%JHO>*81&8^%76P{O0T04gP!bz zah<-cRlf^u+i7iR~8Ov&<}GEDw7l(of)!SdCKu*B&vM%z;Q6$Aq?$|Ib1L9vE99}w zMo*}w@(Zh1HuDt=gUB)RC_n3v6oo9U;v4+(_?s(Ep+{ppNw$qafxj(#rf5gH9}JlN zUmeOD91V3Zs_09TIW2Asxp8JF-fk_n`{(wVVa@k+<-Y zW3=F!gEhPPup6hhMF@vplqPYj_c+(Rg5KL|Q1ibgE;6^8?oZy#i}fW7)hs0`M0z+$ zEnP$*8-;Xcxeg_0oW}(#F*enh_s_*)=`T{A>a=8 zFHM?FJUy1{DMr@tZopEsu8+@c?tCKSN-3gCZYQ<1k>o7={O?{XnJP`tY}XBmL|ePBJfw@pkNFP#4Rlldt;mv9#ip z75BVJA@c%G_8&n{Uh2R#aXDZ!2C+>KPO>*%Y4o|&Q{d<`nYsPg#@#-$22D2%rc&=p zTE2J@h;{x$YyC7RG@c5-$_K%i(n0k3#X}h4BuURaU!q%oCq5FMODC&VY6oLxFkz%7etmgP5chpRv(X#E&j?zDqwX~d4Yr-)&ODQYW5!N=um5mv zid{a;)K3?5X{u8~a~VJGQ8ARKdeH=j15-vVAIEwem-FI>Lb!PvgD79+Fs_|85B`Nu zCq8f*FKMKXkF4{7>V81+T`8Qp2GGLY0Z^nxr>&-dj;adWpPL8%kt4u4?k?=!E5Zw9 zHC(hz8>|fv#VlxrowCn_F@3XnUEQISIgr$ za|tzlaxy37nbu0Lr&6W}{^FQO@)`x1x42jI%Wy;PFkE z^1xl;kn%>>rRT(o_pD-t(`<0g`C4!mSH<|oy|i$L8EoiJW5>F^q0nQfK>t`2DV#fo z`d{~ggT@kEdp8zB>$7ZaM2k6L0%J$y0_pv`RJi88h)HXy!*Yeen5g#~Y?K~>x?dLV z;R}FDUVxPmz@3LjfhC^6PhZ@a^<5t}ebha8YAVgNub;#Ff&!Ebn@>?YQ&8o=bf)(4 zqOf;TD)(;YK{N#on5&coeWC|Y7#9quzo}7z|0oL5j|0^!O0=tOI&VF@i9dIFEna$d z9DhtN<{qmx^7$i@@qJbwclgkNh3fNmc-$pV5*fYR=OqnOt~u=BUtih{WpCa>)mdda zCY{eooS8+=-{WCu*8%u0e+FkJJM)giL|~VFl1rbnk8i&dgcpLoUJa*$8pZkKJm-%X=)#LQa}a!S!sFUTm^V6#x4UD7=6&z@f}f?_ zvV<@2RoV~|+zRo*y1{(l>M^%5$x+&gVs+Rh39Hppvg#T zfFqs((-(@N_lg`Cz9E^1wd6@&^t^^&{~3fjM{}Sl>-M?V&0YM9 zhkNVm+P|V8=^=mcSP{4E*ArV8mso6ezlhg59&(SixuUGpWd2Pk_>LF#m-r?eyTb#QL^)B~iwS(q{18rO zjuAbZwhW>U%E5)17oq*>PH&h$O!*F8O?~N=3TaLm^*yr(Fzo^Jyhv(IQ+ zENe{m4G$=BQw&5actD)x6RxkijK5V<1NpP%AkQM76#c|ePIe}zzWpz^SgnEY*bznR zKA*r}DJgLDNg(WyP2x_;>fno6S-f)QRX##@GTUb1O=pBYtSx*Lvwd8EXLrnFou&UE z*Gm9e^Zg-f(|x$tzf2%65rpY(K!am!aLBqMF!7LrwEa)C_sjj{CAk3dF8huuvb z4ezBT&}h(Qd|Eu7^t^a3y{rcJ&PgP#9cr{rO$aANQFLaD92op6ApLLVZ0MU9lE3;6 zng$!Q&=)mucSr+Vm_FbvGx@|1c5URJ#xcXS@LZ(| z>-QLrE1vm-;@0(S{Xo9|G)tK{iHopbe1Ib<^JPlUKH@U3<&>az1pj&Y;F6`W@vdH3LX4|;&5)`a23=FFk%n)TEH0^#s1y2#x0*`Gs8R6X!Q9YZch3g z{(kB_*f9SNc02@_kaw8BHZ7eyETzTv_zVHB##ba29QELlw~ zLUCaY%T372e_r7;ihA+@~ZP8@WlS}P}X3y>8~wbF#R{3%rOd?Q^^-jC|* z`tkQW1s1F=!}3R}kaoQ)qzO;Ml+Si_tuu%d%h_j}@LHF)t{+U-HClK%MPRQTo$-_R zJhEOgg882k$CaU$cy---8`}~CN@+|4kMspNNBbkD>|Y9Nrr1*5;$-}8>w`|dZG3HF z2s^L+7Bsd>k|GsFn>U1Adf5)$jRpAmX#jS3+d%adDH`VN zg{!qR@xe1I_NPRSPUfFOr)$5Ud7(7ih);#|_rCCT%Xp@wHsHCHiU#}buxxQOl{t-PTQnYk`@dc=^}m6R;yYk^Ngc;-^+EZFk1%WAAbzva3f!1F z6vHpY@kYhw_~$`1J#%)ZDee(yes2nNUH7GoLTT(=P|RFbjs)$P&2Z|`JO0(_^@8yW zTi{&wL7YB(I1K*&0axBqr(;U;^hm{lqNat=PGc|JonFs(`Xg#{mvkcQ_~SUT5=bWdk;u2&zz+D-j%Cm@F3F#0Uc*t&~XPd{IO z^6vs{6dMDX4;pdfb|-jqmEl7#9#;L5C)1^F?4(6Dx?GjwKPcaa@0Y9SjIlW#dKriB z{4IpRy3*MG?ijtk)I}`;^1N8=51j6sg-$&a>G{uOtdf$!#n;bb)T4QruykO5O>E^C ztD4#V8a9sRHJf7H=r?@Ml`Rn5_Y7PA%8>7xX=s@T=Y;BK>Q5P%(6L%?WNz&7Scd63WP;Hzjv}4*lYfO5mwa?Mr4+Y-=VKN6I_4_m97y8l zmPS$Vfid(xvyU?fGpB+m4?*vhRkY3AhtAub733HmhfY;*dRtqGJD=^qBB|YE)ln|^ zI`ItV2TUjhdE3DHgL(@!=AZt{(&^%LcUI zn|oMT^hj>m@B`rHKj6X6Z{zcZE@kIir_##A*Szv6U_ZKtK=1HbWEfqz4U6RGh=2dG@$NMFMKa=v2Cuqj6$W;#2vvPTIx_qi1X z%1*&2)frrP{vKPlaW}^6u3)Q{FA@wM^Z-MrhQi|X9I#nw$8O$f;#5_g(8l=!e2H|x z{ugi2*kUMRQ5v*&yU@P}asz%U3y_@J#TzVg!kZUN>Cr4k36~572WDMD=j$z?S=1`9 z6((Y^U?|?(dYT`##{-g@O8L6WkvMYYVPr1F+)Slle)Nbv+{OXx_WL|>c)C#r4;q+~ zlez#7bWR1|6nT;r?*_L8>)~gr9IIBUhM?YJ-1DLkW)FDjGG&f|Io|`1eA01PgaS@6 z%CUX8^gf!rSLfY!I->i)7cRyMlKAO(DwzBp4GVw%@9H z;&vmz=KfTcZ1)&jALMZBU6^(|#kHpY6U)XryB0erkL-C9vZpnr-5H{47 z8+3aFH^KS_7kEJyhNLMe6Zy6Y_n0i^2b)wu+pop^4HYdMbV0;h_*~$F z5)TN?RzAXs_T?aHO;CM72fu}V5~kL)ft#2bv%j~H)9l>MjW!MDPh`L2CEw2DGi6lS z{_CgU+p;~7AMXPnFP`R>IS!(t;9GcP2jYVDr%)x~1N<~{haF2NKzHau+9Dz3v`63K zp0-NT$jPImF9&nW#<7Q*K5VQ?Z2h+h zX1Jo&fLV;Kg2^kw(Pa4}*n9sA7j(G~l3SFq!2OJ6XivJe!Y7(Ix!ilGA8&x)P2X6$+lriiO!b5Kj3AbEgB` zxhp2Re0c9EP_ui*m2djYU)hn0w!>@%f1983F(Im~Tl$|ccCI*^x2eU(Q|BYLn2)0h z`A0Z-Q#Ouy6#zT_^!tbKTm$l#b`gmRpRxiTKgR` zI<)wo{~9=z?-E#i zpI+l~?h~-}b1?3{4zAh}3J094QDk}+9>0Ii*JNL_tv|5|wi_-+a-F~zuYJp}OP9nv zod=k3WjI=259bYpJf~l`mRFj7hqJsNg-;)>LuuWqP;|TjETc*|!{3?Q!M|eYG5Q*B zf4_@6Qe#FV=iNe=r&obcfmpb^msK9LICw56&{=}VU2?&F7iu+)40 z>%UIaUf+#Ft)_92=XB5?OZn(u1(+tj7{>3Gqhm25E+AtLx7M{2ZG5F*=KZA@K})&y zePOtCb1Y8iMSkV?M0oSI6)iW{;hK`=;8WrVOOLLBr%v_Ua_eHDU{emXl^L>ZEk6va zmV&Q(yx`T;cK);x$N4;ajVU=>ak^hTYPy=kXiI7E5_a>JpXWom-*j%>c}0}#Ye!dS zDSAHHj`~+U7wqy0BV#oYY$+Xt)$=X6n87wUbB6-nT4l>B?(9PGC>_=>83)_ejpoz? ztZ1ZGHP(EaIiT0{2yRZ$LJOq}+}S^;QF8NDd}UYz@d2)AsI?o8?sZ0PAA`oWhkW49 zzqmqIntfhySrC$(fPGnaal(io>LVlmMr0BNrRcNI?ZLvC%B$e$QUkhE{)jh3-dF?&P-l@IG=Ce5_IcA72R=)AJBRUi#p)5pVI$nBTZX zW!IAS`bbjR>_ginukm3! zEFiHufu==0!3pN;xf74yqSK>6@an!K9=hhjT5AN5Ym@~Cf*n~qIC2T1=Wuo8KIrMw zV^zUMG(T|w>#xJ8DSSPAtH|abl%(>(nUC?TOCdb8HR0Zxr_+`{^5|xL6gNkYW`QO% zVN@yp%&Zf$%Rc;gQdueSMaX*dKuxOwMp>&s#xH zT%7bIRgpZDl<&QSSLH%3@k``OR^;gc}ju;;1?53`#E2jh4%4Lsf z@|}aw9~(p;UyC!9sdCKyX`!w240mXGq{JqA4Q2OUSYnal2Iy(4AdhWQf^kElAzW`J zDWs1e`@0{79WJu$K!6^t6OV*dv$wOnzIu4GA&wM{jQPBQn(&xo1iR!if+k!mqLzh| zxqbOcT=~C4lpOU8c1bi)(E4l^8CU>|6Q$X*W6f|Y!v#*P{0B0g&MZlXBKv+oRL9gB6zKrsUC1o%^B{XXM122{iW4jtNxRmosB<=A4*8Ew@x?W|$ds7(} za&H&+{`P?9cD4~0uX`5d?9Xv;!=Iu4)-v8A@2Mc(QifH&I7}6Tbt%Ve7c=s`gX?ys zf>D$jPPkS9IU~RDkKJmqZA=J%RP+rsH$TOC<71c+{1x}h1yOc*Hbb%K>fK$r>+9_rS(4o)i@~n!QeI0PV8?eCG;AKU0VT zd+>&C9CzZ`WpG(?fYTh%=e)1q!brXUgQD~B=j!|8xEYe{3YitkR)o)eA1w`}(om_i ziK6XW$foSFWrdIiWh9^TKBPivYN=GTOSGg?_}$-s;Nx-c`@YX>JP)54{?dor3ujG66w71q9ZDz7<95uXilz-(4Pid-^A#a+WKt=_0~xz$$qH-MJ@>BANo zcdDd)8Pi>akQ^q*avwX8(Rt&@!2D_CjoEHqn-Q{GwY}CvyN)4sKQMCSsdIQ zi^SI-Y~DMwxI^)R!fRrv-d)5AvSsi@!43TR#g>J5+!Us6jA7RtV(45?Po^;D38ol| z!snAy$WPgO)KPmO`@PSXbp4KFV`vGt(#{_?PCUW+Z<&Cb>l{Hd!QAu4l?}vU5Te`Zgkn5Ot2;0;~kqS*es@xd?BOQuyXw@zLOgjav zpXShx)J<^4F%6c86yW=l;!OT%IZWSi6-+)57>;#j&osVqMt5UiLbnO-zVe9A0zAjh zt7X_&WxjXhwTFFqEQ1rTjpve|NCCGr9%}~EaQA}=+`)>IobIV8Dlt%i{y9qI`&wC) zu<^u^89UK5SOx9|RA9z#1zi8TiCg>0fF);)WBFpD?1--_)jwVUSIy-Zs=q>?7oHF! zRfj514MFO+1s9yXk7>@cgk;8WVW2Zhl3fJGn{NrsLv-QY0yDN&`wAF(jRfCHY2h5+ zM-}Pu#ILIq$iV;#uDLE4m(+lYV{U>+$!PZG;&ncc!?;uNHf(SELG*7P&!YS_+1q=n zurg1JjXuIK{rMbN98ig2oBaf{hoeY?oeY~M+zI4R72LmP$8Lr?Q@h1ez;R(b*Kj4B ze&;(Ia4;LPJ$67t$2A_Au^N8sOVTM(kz7Y-9_|R*h9`pd!q^@Ep;5OpNY=GNSCc+w zJBLHk9dYKhPK()RUq{`=Z%|5Cj82W^J8_3*fcDm}!lhz=V7{$4_9$+Hte;Zkhk+@x zT5umN4?IK9z?&Eua1bWOy%#)~CJXP6l)>BF6Hx2y1Uva}U0awuiLFeeXLU@_E$NnU zWzKadJEzUYi5-JlKGWp7P^opB;h;2MP z_%?+dR_%qNo%OJFWenCV&cxW=@3CmpNt7!Y;%*F!vW3sD^4g#>{hsD#rxfFg7xGky zmD@9Dxo?OT^8jzDhC{2eA-8eFcVzoVkhUG?_+II2SR?JoHtdpN=6)vZ&C)MuZn2cv zj#ovIo?19F^0UC}(^M|RgtAzbR-j7vsCm#eG$3mHJQZgpQ~#n$wJ)b($Y(5nY{vrq zDDGI+B05EFDLj=>CK}RX!7b{m@RN-i6Y6gi#7~dF-Ze9^Z<9WRzbxY1`IbbWeZSDT zs2{U!Cord;0PH{Bf+3?W!GX+NuKT(aBz@==KHAfRHc|5A%9wmuX_N@-dMCnxJ=3A? zX*Er7(IwM=ah%b&Jdg_*C;Yrh2V?IXuQPR!Cv}giK+5hgW*&A%`w3b&Wpo(b9{h+J zeT(6=cY9({wiwCMe@$)Ox^VuyJE-YhjIZlLVfal1^!H!J1u-G?^t)vEso)6tzLz25 z+$z*n7^dY9A7W{TieQz8EbB>7g~@@&u$|Y^UOoLyT_f!=bA=2yqdAf4w{OE=2@5!V z_jDwUS=_bbt=up60@HH=EWb*j*89&0QLUJtQ-u3P?g?8KXVQmGhFpEqThLGVL-+8y zmrkrTEVVFTHyVZ^$6%YV#cd=V5fT9x^qg^H-d>mzr^7l2_1PtBC9o9#Zs)o<2SWbX z!ncqHp_XK#z@}<7$W@B6(wQe|?J-e&I;I(YBroHm@(TEV@UZYg^IH@tiiWCIEjXW1 zLhnvP5R3}O;mOXZGye~rKEIn;kdV79>Sxh0nWXNk{!5^;*=FX~r102vj>YJE;O!nLI*V8=#H z48~kRW94U#Tep9W?%YcnnZ^=YaFF>6CS7R z0TcdRKil#VRjY?-Laq-e-cQDkmf38ri5UCy3V@sZ8Z~!b#5`|1NUKT0heNcj*Vg8%EzI`MMGx4xE^A8&*%Q9u~b{57CgK+(0@Db!JE_xELDO}vRGXa z6wN(`P4462$-mJo@OL!!bc&(zX>H6I`4jJjhzKp;Yf)9>$t=g{AW=0xkJC)-i4C7m zN^E-rui9>avV9V~be#(y29|S^#D7AJ%L`PB8v*4t$GIs5|KQn=@ep&<3W(b`nr;>X zqnC~#`u;KOuIzf4cm6Cr5PJ&x2SsYf-7sM}Ek&%#Ee&@WSYX{bE%MN>6i0@&(BXC+ zqCWL71ixR!24&L7lKe6h@4i z$M$`e$G=+BgcDYn!&I>xrl1{#^DYFDwComW%?jmpf`y#y*h`r3@i=rwTY-GpHn!dN zFa0u54nH*fnf@OJG6s~XT-i(-YcfIpz(}&HPMUDqkKo)KEt0i<1Nr1+M$UZChEJKB zAt?9+TlaJ}iS3re_?ih+##WCOX`5jOOCi$hIx)@N5~b&FAbNFvxMhwpQ#Lll#%-&Z zVcujYoR^5DhA~)ut&{suERXt1#%#^CIQZ0{%YvufB!3TQK_1^*30L766dEKiQ7gc}9eS^?h(XzK%HUGh?o)&de)kBln;>84Jb6 zGWRbDwD!X^T&OKa>g=bHy@h|c$@5zqT?-J|{l-~AN@Vx;3i80N9)k1q z$+F8`C>nPfM}N-7$6r$Dy3GMlm8r+1`2JjykGa<;VSjQf zMhFzy>q=LAb9R7}vr0y#m49IGxjs(p*j-Niavbehoxmj3|8j4ej7gtiKiEy*0&e0{ z;Hr%{iz*Pum44;;s3nhkHnTx6V}w8I9p`xh+b5CUcdzkYD~|uu>!eYY+41LcXi~%<-?lFz+&#)jEi-lh44@hApUf_&AL2w8HmZ zqGZLS>Cm1u2>stbLfYOK)@zf<)=j?%nNGIQx2_q-D!H&qsYdMicLWm7DKN=3@}%#h zD|~kvMQ$}|2{u>Hr0%?a+IF!PlJ(_SD&N;jKGlMEMkukgjsVb{V-4HV&9V7I7QRc@ zAU;n=Gri$z8f~kLUTsPwaVQIxT)P8JBcniNdJ4}FnavjNisSaInuHstQJfr5gZHkl zWqT%+fV8{;Du1g6*;VO~`!E8IXqUl^CzFZz+vT|aeGM#jz0ZF;4?=lJGp0W_0^@?p zy5;A3>e^n6gZCHHuy)P|oZWZ<3)My0V&O%6FRH=r-3rB3AD2U}a}4}x@@DPL>daAP z5Zad3;8SCJ!i^uv7QiLkk@p6V=XQJIFZ9o$)3o|$1E6&V?yL{tp1xa% z&&|-{d9MRo8jJq^bY7uTnBhedq}sm%a?IgVX5cf$W zg-rvIJK=cnUJXVaixJMulVQuh@jFkH4*PvpQE*{(2y`W>V|SSkJ0w33%-n*hOz;~{ zW%d^^j*w@L0kV)XF@qB(EN4n*66zL3Zoys!2|DA>13KPa6gu_XVd||GJ0tHFdTHeX zls&ZxM&Ie;Ud8bJp12A4`ey<+Zt4U4J;@Z0?U00TVn6VN(sUdiUyjD3FW{(zR$)iD zJxaf=PuIn>l)DOgF#vWj=H zu+L12n0qRrt{w_XcwhcViX-@M{!h5r#If#+=uh;Vf18VM$)O+l9)Xv$7q}dr%$^@+ zC~|15FgK8Ac-;SlM^g;&@#&M?zCaZaxvz-x4(MRiI4RP-C>u?dainJ5i8@y|b!KU* zhl@={vHla9u(CTD!zNdwm)uY8z)N|YeNLN9tv<`0HqVAfB}eFyMT|bKGr()VW<> zW}2q*4Ug^%g>CjPxSty|1-16NnBroIdTUi!(IyLaHMtshp3DWOp)6dQ?h3`x$Kcp_ zO*TfZiz`ePgWeizw)oKmyc=PKM~@6(t@tc3%Fx4|@pB<-$w92t9u_v|hEN$k-(}tW z9CQ@*Va29V?3kt!SR~ws;_DCa&FeT=a?BD(M{WfX@uS#fYmPBnUeRoWQEYFH2Q!|E zut<+V{{b^xj|X{QX&!so?FiEpdZFyUY{6YV1G*$$guM(}$j;{<=A5HSY;zmK!6;<` zw|`C&#ly+mj4iu4C5va^U29LJju)XBR6_463XY@G(Qo@i^6j`Xd%}BDGRNoP>BsK4 ze#IbeTNH=>_fFs!)g~zSw&Bv8R50OU7Pv@=vAEgdRH3X#=ry&K7A>5QTW%L3_tzHB zB-LVn<|(kOkS0-lSNq}^9r!Xc9oIC~f>K~7UGGtW#oDd7_d*nW7}5g2MNw!=)aby? z&3JSs|9+UO;XjLOobOwCW^N#g-#Z@DXfJvG_phYcJz@~6r%rY~5JQm}>SQPQaX}Vx zv}kBOK9xUi=WX?kI-Lu}OVNg)vp){D8=7HoN<14pMzv4axX zpx`xvJL{T_yXzP%v6+wkWG-6y>9SG>{x_et#KHUFc5<7cq(NEY_XMM5BlCfURD5 zY40V@E4Pmt|DM3%g^B2%I~E@O9mN*A*usKx6xO6ju^+jcg)>U~>n4^&V(niU@SL)V zKFZky$$fR$5j>e0@ZQ7n6H&Ov-4E|*#b9mp5y2kY6k+teGuWkmm9|t4!}&MW7#LB2 z8Hl=;)awQM2P%BjPowDp}s1~Npyo-W!aVXwik9h^B&?LPB z&2GJgiFfTWSTPzMwaU1w8|OpmbQLx$a2zYo%s|y;8JJ(H4sQDvigXEeh*{qn80G6Btj~B&70#c= zoy(GN)-enI*|lW<=`2L|GDYIL?B|cEdLu0=vIJi}fM4WvG-(DYpJ%V2Fi@eP(UNVmz zju0dAE1qyo70P6W?+Lu=UVv+kRIvju=5hM`4*!1IdUB&M4z!Zz)8(qUpcZVzhw_Tp zz0P2=sw$9N@s%f5dt~7LiwFohIE^GOsKb#@Qb9G`8mDRufv)OF;bpzpIyE0YJ0SWG zM7(?i{?`AXvFZY*lucw?`%B@Uj0VSA4m!7Omba zhvt|fYEvvqW(KHoQE}UZCnb0BY&cW0f8}#js|bR~$F;)Q9l9joq7@q~+X(Xcr%_?a z8nPRI3v^e864R)~WJi@fi63btu&Ox-8ykI4xX6s%Iq3!Kzg4p%g~s3`aqXumq_}1R=sVZpv%{C@xvxAIper71?fg)tF$8a%Sq>+> zU7&S_FMA#~2!aWZ*{q5~MCtl_^q4-Itcxy(7Sl;&SBEWpGEpFBxlts4`d)CD^BcbH zy2XChpNCJKp=dXm*PMIhS+=nYl$Ax3>ZU|i`PW34GhsGqyC@JQrtfDVo&8YyauiF_ zzYT?^GgxZYbzHuzg!{?o;0VtX@i}adr)6STT2Cbjt6L7M`3(OLEfFHxX36#kWkT`S zc=F?u9?>pZifi796T^F2tm2sj>G_!f;R6$xcluH`Sw@H%Ue7_IFam!HH=FZsAVS_4zKBuzU?&*S{KK?pLFNx-VI|+L3zvS;#ZnMxeN32-t6a zBn*t2&qk~tK$*R6L~7i6;`<5lS@JpD@U$Ctn&^-V(@fdc59Rpsn+mt6AfIL|$j2+| z#A*P{xH1jK9n&NiPgJ1J%N|H{5|Wj|<>dLE%a9Br^bNsWF^DWjR&%WhCTkl%@Jzk5QyBk1fyNp58Z|2xIUxqzd zvVg&l-6Xn0mFO%8f$-WU7cF)4U)?rKmnG4yZ-b2g%tiCkJ`lVj zEim$8E1Yf`hSDEz`TV3F*}OjtT&7OO@}E`IN@qJ;&-bOIH%1e)m9}KZ$P>_ieHZ=y zqZ1$SUA8nSAv%i`KtkFvI&F)*&_MMcY`##=-GAJK`JU5>4F7F!Y1SnJ%`3Q1C7amG zfOM8Qu|Ys@ijlAB1-N0794)&1r(mU-L{&`Dx$}6$UP{Ph>F9bi! zRU|*jwQfpF62wNg!lGMq87vuX4fAN0Op7;R2718L&_!m0kQapKY|QqjBP)VD(MF`R7#P!K^C2 z+hPl{`HR@Ne;{N#B}w*%(mJ!46V#>j7U~%^pQw@k`U3G`y zboh1Qn3IMu`shR9?gU%5CG#`g^hS)yg^IIHKbOOsns~_RG~xnh^`k^ⅅiV;M%|Z zyOjSx1`J%!;HGtP@XR)ud-u2;lZbL(eHp3TG`yss)~zmIdPCb19i{owJY zYz*3!$+i24G3oVF*_?pUQ1<>foz`B8yEfGDeMcQ;zGN*7Sj=P|ej4y8z#HfeMea_} zO8U&8+3sx2Jj~Ll!3RqxbNx|_#!oY)!`}kluPm5xJB85zuzapV6zF<_^QDb)v2ImCktlI=|ESm#r`YZ^vj?O zN_aTn(bft0%jp&O77B1u^Fx8J!yewJzlHc&7cK=x;9Na_zE_kDD|)NALqAV*HolpV zaMX%*b-IFgi!RLiFrE3@o#HNy9N^YF4Z)+>Hf&P~hlw8)Nouk>vRYAajP2(Nu8Wf0 z3s++2ngjUd{SOYt7=ZnDKbm9DbBUd$h~vcu^jQCu#;&fzB~z8Cpje;n|7l7!@2N4f z_l9*R6(gbV=OWH%TOarNlQYg)aS`Gp@6tWxeymOW3jLU(j@LGwNBXOlKJh(9oy_;( zr_`lzKgbYk&82aTjX9mpIf8}p3z|6)fo_H#oZ-FKXcIII=ZYTz`-(EOS9C@jSV zXHs3#{cYG4brc6$LNPJtE7xVyh5j`^QTZ;As;6)0&3pqmm6(db6PL2be!J+dD|&9T#RVUcUS&fji7uc6t@Sp;}~wC@MYIIczW7`wKP=0 z>VpYfk)s)0+}%v!-z@yOu$Sj*dD3xaPtoQ5Vql&E!2Qd?Uo)HzT${makC;OEgI_TI zpFOLsyu+=%CdTBO^jPPP43L)ki1l6RTw^yu%lL4#E;)e5KJl!GeQ&q}HETG%-*#wT z77jO5LZH0+9ST-uK+_`yoFH2dQl^OC_xpH0>Kx`4n-0=qv7F70Lom@x z4lXyZWg=eBXyt4QQ~n+U!>J*7fA$|L*0hdY5!7=2N9yULUkvtZS%hr$nX zr@8QbcWHlF4_ACS5Vudhh*f{4L*|B7%(JKj+dn~=CjLSAQoNFS^rT_^TWvnGSV^0r zVleN89a+DGafS-5+|b7(G&Z~(4Yz0D*6tL6s@z8yu-1S#y&QfxB8Ix7Tp$_U;lQb< z!T|9od|~>VTVi{eD(tWnRH{g!-=Q<;9k2~=Z2iEQyEjvlx^w8Yn%A!M>@Z5#f)kE8 zh%0A|#zV(XVM?JG`>{9rl3|Pu_JtXN%qV5hH)-Q^oc~cJzi6SILK&|ol|rk|4!9~8 zi3-Nq_;9G3+M6xGDKB=Tx&1KKsZSD|9ejv0zNw+QcO9-V;Qi3tLHgDyjZ58VL;g!U z4HK?-bJNZYqUnkPe8+cmB0N`Mo`E6kuX~PVPTvG8=k38lr37qfNX4=~M;NL4jEZbL zk3Qw)sI@2?oHK23s#`j2JUc{HhxXUKDi{e?x(R5O7{ukOM!~r7kDO>*AuZ|Mz%x11 zvF5o9n_82DW7?jF~)u+y1&`O(qs#99P9w2 zmhEU!Z!i3_BABWEZWJs&I-0D0RffO2m1+1cHS|jxkJpl{iOyz6(DsakX(yzZ^pX_J zQJyULFE_yM_VZ5ct|z23w0dow`Nw+BtT$8)!1MOf#7^XQ)+09zjJK+p0LAu%iD zc*h@d+_Ldw(_XxvsKwRIIRni)I{3y#2Jb!NHLc7;@bcLWfzc#Mdi=a7o%A$~bC0|V zFEi3ngMZg_2R`ACji0ezx|uu0v&>48vSIJ_XlncW7dIw(2F*&9!_yK0P-LXXgf9Q+ zk+Auke03VOS-J8V00CK(c?3R{mcx;Q{#Z4u07^A3VYP=HRS_A76C|ur%ytZ9TZ|DT z=g6>ft9{(f7(YzBI1ZXJKH+1c*u4mXrdL)bZ)5O1pG*Ioe$#iZ8Ge<)=BLAFawkh zm%-QoE27qy4Yyy~5Q9CnFzg}9&W=xm(pQD>efoUz_hl}cD*l0d!zL=#Fo!rDKLFly z+`u}~h^&436t100sQa*&&(U8_=3KYj#u=XO+{o$tb33pBBce4pw*r55=ZrfF9Tc$L zPnr1CR(KxOV@5QDX(e_{?R!(zVMB+VAOTao6~=NacC zP87$4v67)wQar%1$WO^c^ok+TeKw0U{yfK}jN(C^kT_iENy(v&xnbste6(g#byW!7qPe)&tZFCIq>|F&_B9ttGb$)CAA zh-CX#9>AHyQ^~2{j2&V%xX9R;U0QdAJUOuf-Be$L3x9=)bV)GJG$3S6$4-(n$BBHO z*@^pfwTVk%4ZB`#Tqv8%{Hemqn7 z9LV#xS8~q|9U`VLjTU{g1{_^L=XCVtZR}$H!EB-8{cn>+Y&}&&vC8bKHkvhd1wbepwU`BYaRQA zdk>RXY@suQHX&7bYsk(kFTmgmEBuf>jcAoPv&r(8h}XHYLEAbU!zw@P- zuKg+Q;e#>MP$P&=-J{8to(RIT)!}e6{2)#)NMJ?rqI~{Ol-$y)1`alpC%;#*7u#QR z%QU5NTBtVb+iHxv^JAdzjz7y-7{&x5{62AcE2}#-j@k9S!h74+kcNVl?7K}e8JsPI zMu!aQxwi@Z(^XjCUw)@3jv(oeq5vhM;QCX27A|*#^II29C&tK=w2CWq4445CUf1tUvkdlXR!%ajHsFUJYqJ{4d>WraXrPA ze9w9}ES?d@y2be%>fQu8u+$5H%tz7QBP{ooBJ_KQlYvP?!0zov@umwPKHvw!|IE4l zOJd;IITO+ztcX93JO_)Ocyx=lBkq$E;aj6FQER&lIyuk4_GT{@Z0QiX%?V^`3qPal zi*~N`+9}-VJd)k#Juk_YGjz~OkHw{1l75Rx7`FDRt*+*DFnxcRIfR`6)zTxROS=Q? zs&3FUpBgF=qRTEmAL5EV=Hjcv)v!b+QMh}N1G#xYm5rCyCVuaeG3A07Sv7c=*S*G& zrl=YC#Ca4`v5aC3+x775k+-zlH-+BwUeD5-x8S>m<)p=dpXnYlC5J2fBKJ#>64u-}Wu@s->_;J-%uH)4M+BB%nM)Dl|_Mb@aN6K^N zdqnX2^_Q@$GzZGg7?52_?c6>-=kRY@7%TSUvjTfO;P$61@U&dPjAw{3+ljXHa`iEm zI`0jv7&0W;QEOoQx(;rrPZ3ruiWY42iz45j8*#SHmGs3A&7ZNoZ2SHCI#Iod|87YNuJ(~lf+T!y~BN#JH;z;?&? zv*3t9@Xs$qr|;5ihrw&SuuYe7i>HEYU?>Z($bz>y0(@k&gAN_|LBBuAq5(Q0#N63} z8vpFLRHgNj4)iJFp=C$FGceS4HP6VZ{(A>!=88hmViOo@?1T$j3|Z?Rikgak>}>cg z{Hi7tCH>SK%c0 zu{e>%F&C(s*w1wY?ZbF^Mst6Ma?)&t-Pv>3>Fjy&TyotX!{!Q%40cnElPhS6*D=%_FUt(e zb%~RG7LGr#4~!E>;huf94JpCWUQe1O8!fW05;J9#m&zum*RuC2RNHEJ@Q zzvc&eIZ8s;zgf`JnGNgujUlu*NnoeD6RQhfgKFuk|j)ZZ&Z7CSrQy93GiFISdOQi;4Lv?EQ7ec*G3?qU-$`{Zu{*;_|*ekGvA)KL0ww-3k-7eVG+Pv}mm#oNAJ zG=6Qn&}?~~@XYKMZa7p99iB{wu94OBZv0pJ`8`FC?nF3qWezqAUkjWZlDXFC<*2Q% z2l6U8bR@qcZWuTVtyPV7>Obo6#h7{c+h+$B{fR(%iEo^9bs!8w26(j&bMIepki(6^ zR39K>x!6xi+{f>i?*bmE$kB)#KgF4BF-tNl#bp%{oLfu{{g?8a^TRZ>&YBLwO$cL6 zXF{^5In0%)z)3-RTvlHU-qzd-!WMHZ$XSjR$n)vPib01*G-?z`3U-x;U_eueu%*(9 z+;B|+|M@BOcuW#@-8;s4?+jymOih_iQzPd;QjSGDzJtusj{RG0QbWhy{1Wn zXWKc@5ChyO@Y+D3i%i1>D>nxir>43@b(jz{JEDe7E-z zEj;==7VEV@S)EKgM7qI_vKb$%HA(dB_7c9e3KIdX!>X9$Q+pAY#L zitumGa_V<+1bK4ulAZDtAyx;P3&(DrN&{W$xSP;{x8Iz{tr~wIaI^%8C* zu_!Lzg@RU|A6PrfS%}D zdv0gsaTE@vQ;nZR7;`BDG##$OyzD8&ceAoke9UZAiD?ISy;JOI@E(Y_PDP0YU%7z) z+)+$sxb9=S7F+#63Q%)9B*cv%7W!?tyLu+~od-ym4ecl2oO%6Z_D^`b*@Ik6nGPuz z_2}sdw;+eiA<9|1QDtf+M7$Qo^j(GE`N@HMaD6{$mmeaJmhB*lYKy3uvL7Cf%H*Q! zXEOWOsc>G$0mmm#1)m#b$Zl7lLx>TsE6Iie(VMVpv;w<<@~BvQk$d*19hU!E4VD`; zp~K#SY`5G_Y%i}uV&i~I);|JX(uJ;6TlRO%SmHaZ%l@rVBEP~SQTLq`k#zhI99##W zc;5vyZ*POm&zuA*Fd46a8=4%EW_h=cQXN4CoY2X@7vDZ{em|U`zMkhR-`>i8UzP?g z_Zs$oNh8S}5!~-GH{u*w0!P=zlhYBwtV;0)PX8^#awi*-u`Xe(v3UtBUB8%B?b$(E zT$Zq|y^*BIW(2vkzZ{B{U%<&L_o>^7x#UW_EYslGYNz6q8S&wHQ{NVW$oH*q`{^c< z*EQRA(PT?fQ6t0?ZhEl6bu?Q#-h^1)0(?2`H*9(;i;oUjk)gF=Or!oN%DUMz?d*qe z&+;gn;XIBF)TXnf*&(Dfe-!IEGmAW~A4Brr7SR~9qaeN5oTL`mkyTR%aBF8FIr%!A zxf^{2Tgx!?nJvWr@k*>cC=qt6KZi9bmxPzZC&C;OB{Ao|Vxy}cdt}r4_Q8U>07vXG4j4{62^pJQpq~UDw65t&2Ayqw`3w9161%;=H zn5HQ4+9$wJeXWj3e=5g#Mrd!7|AS|n0yKOee@qF ziLPe~cTR)W&v|UYkB?-NOE8V$dGkfF>SSoj8{D@coaIfA#Vbq3F`5|%3R;qc+WZ0g zlplEW&>Jq@hv$YUydg_wPv(x*t;Tz=6ksUb0!F%3g0JfiNYFmQ<|=!#53PB0)cT{` z`dRat?{xl7ouQizutkYYKq0JaYylhtTFl<>?W0!VQkr58xqTB#*hCA6Yg3ZiB@Vc zOi8<*`=PgxFEGs}W3F4l3Ljf$VbMtcHJ9S+JJ#HXt&XfDM4Zj2*hB14jck6PNu(C3 z;gc!%=wz=j63Odt=}Faih)BciLj|mB-6RP3I)zZIqB6CC%Wxy4v}Y1tS`a}#s%nz_`i(>`Q<_!o3KgEZlfnYFmT`xkZ6)s{pJTC~ zEen%<2z$quL!r`G=BFA@3g2I*GqngrIlsbJJnL+`+fn%LVo&rZC4%<&AWYYehKo9K zxcK36WVvS{DnXMtH+!S4x-QvVFq2DH3j(Xe>!?68p(EV^M(hy4?2QBHvML4}Wy9D` z^|#be}5}zM43J zB5b)k0g6xEp9qO0=3LBlGFOA97Dp1ErGiRG z6bidjVN1+e+!2vVx@w|u`$Y%bQE?P+rB8q=`D|SLV;4#4d&^~f48he#L;PKgc`!bT zzejTZ9vV7fpTN?Z|3AykGhXu5(FBiy&U%>5e8-*V)#Sl?z5$wz{!~7e@?9qie=-Bj9kP`e1 zUVCWaw!u`0b6m{p(1&riejluzSC9Jxeqe%b8;EzlnIZRESygg~uE4SYjDg zemO{eLUiz0UJ+ed76o0q{?e9PV$|reA=Ixb15);Ze!N^ypBLRhiA9fKPWE~1klI5p z_15D2(xq(sTW|O(`&y9l_#6hFn}><5l$)>cc zvksomwYeb?m1_Ck^j~WlwY8c43G~6UQKNCjwiMi`kxW;sXw!nZb0D|1n{IbC#Yc|5 z_}DHKUY@i7&2U|o_+J%1k~)Fg+HO#BuJgSfuPuQ1A1w?l3CpY z7-A_yWFp$(S`TG`Gx*FUI|HXYt7+h^wF3RuhMbq$Oc-s==Oml%*_FP{#q$|6QT}Z{ zWHpuvUfZ06Gq&e(HP38Rnfo7xK3@jWI{Dlx_p>N^!5SY0d2;jT{Scf<&f(-mra>T~ z@Mxfqe^w<}WWyt<(iw%lMq}ug@0;i~)wS&TN+bN3t!(>PMjlk=ciKsbodZ>^B<_U0 z5dTc^Yf5HikH9x|-J`Ij3AIHFiX}HUV z&)0A5q9c@K$ivOW^!o87B)caX0xTOj(B}O;Uh}E@sDhVxPS;tFlWc>`d0N~p!OViM z<65O2=xlMsWp7L&aq4%x)g^&nHZ;@6pN+_kNsoB;xB~9vbEP{|d*~TDmvvm}#a}N+ z;e!_*!c8&PVB4=Z;NO-b9CKkAEt0aNHPgNeMxIn6p03{@X@MkrrgA7%!>rt*B}9q1b7L>GO{ zg6dtFFrA&n%|HGKH|);i^rt<<=g<8|n>4?{fNVFtaVAU`c3mSoD_4_gC`!z6`#bD$M2gqtSVY^}w zmH#^@9N(nL)I$rYiKsAK_425 z8o*@nFWCN!H{oB+z;UxgasCcj;lwV2le_w`)O?LloZpA!W^Y2@Gv>kx-lDkl{&Q+s zo=VS`j^P5!t%$!!B|apUtVdFpKc`0W_k!uLD^|+v_7wi_5Zwul-J{S*?|qgQav(zQbC zV@Ijm`UGxo{TA-%p}+9LARN9~&ZoPUtb?_4RUkll4Q`GU(5)lf@N(@P^!RCu{*9CQ zdyz+DMWF7>+$Z68DP<-!O0fXLBPobIIt@olAA-&F>DWP z*BODGVV;nGHkk|CG!oymMbn+@EXeX1lel}f*^qiw3`SoVqD}hW1-Voc*Y`96?s$*N zlO3pIT@emHFQ*>i6X}hQJZm>84p;x~g+pO2&>%ncQlQvp+dvsN=CA&XyA(OV<))4z zrejCI?5W0Nu51?7sab;XMqi2o{{8739H_o=>$btdRv0j;}t0r*J%x8(h+Ti$CLUztS3rja=(ym!S^lI-P zI6febc8hWZbHk5giHtPaP#jM0Y<0j_1CjVsYa*A+XY3CB=%-(8dth8?GpDQG0D)GO zoN}!+{AtjE307a>q-i|=)B`{{@yU&P?|Jpt4ImaR6XZDNkV3XGD-*u zAz8(j7SbR>LsKQ9WmV6)PemjvibBI^*i^FD@ALcj`R93FpV#O0oO56Ib-gdlbTOnu zn{MH--+DAexgFn+D1><$)dIizAN;DQa-T8Y7nMHN6WeXWd_1s4|p??>j|DH%mjJ>3Ik} zZpLO#`~mAC2Z?4nUx({5Zr~NB104iX))u zOs>EDfXLR{jt1#lab33@%pA87zu9G=nr|(%W+dRT&4XcW<}mg<(j1!V!}+lV$N3lU zJ@}w9i_U#}nEna81BqzG#u$f#rc@4Eso$a{4SjfWUlP0sJU|aijO4pT7wLFCBlJ4B zmzU=(qnMu)?v+%bI2;1%@Dkx;I=NZ!e%$CGOge$4fM z+=8Y9R{T#&0S_KOna@*rgb8^H^tXCET_)*5Z@I1G-&E&$>#>I zpdX$^(F6TTRJ8gLxzb~b(=xYW^SoFHvRDsQHBsz|$zncy>~U;u4#c>|3ovB2CBNYq z%XgiMq(hywa4uF=0mq3_uySVLt!kxo^6Sdg}&Gp zQHF*e)wzD3N37fr zM@Oae{QqvEb5bo>r<_B%G6VkIzKZGX9}27T%=ovA8FbbClaN>Q6!5D&e>g9J?|l&p zCo96JUB5q7pYH%i=QqNy7u#X_T0N9EdaexMn98lS=eA3|xa^aUptzIJ!u>~i&wuIky#a%7 zJFk(#ueZsXhQYk{gA_Y9>HxLSF`&)i2_QA-2n5V)!Lo=l(Y2g52$-Mp5IIn{s+gO!2Ac#pvZF+*Lc#8>5*?) zc?f~1uvvVL%w&4bMet2$8`3PfolxAk6&&8=L+!~}n!E291k8y-jnAi<>5}<4!m@_E z4H!yOCNzP`EpzS!&!FX*5p4-gLh-%NIB0SiWJ+g1VcANO8TSfjNx1R&xkbD?$&%F_ z+d|%n+`+Kej;{)l;tD(R(QCtgE_+>xxklWA45b*RTdaXN_X9kNEr+I)DRkK8S&;9v z9sg@R$qKb)aG+11sAJUzDtXQux8BmkRTGUsdC_^03HAdr^*T=YbAY&gD1d5Iz*+rO z@MDA&?-0E5bswIA(*Qld*khPg6v#g``S1}prm}<`IsBW&G?+F~268W)W=F#Ik$o&F0v=a(^GQQ4 zkw;Ah7?o!Ud3`s)ymmIvnK%)W_aCEnQvKpr4OQgTE`dAUHGn^=&4Ka1RA`$`E8ZWK zf~#_(@WHr43=?FaU1~P!=P2NF-$0rxbX+X1S3n87gSaw!6#sp2Ka6(PWC4#x;+Eva zd|=Bw{GM0@5f>|XYjG@`e|~^}SVs65>Pt3XxQ-T6giKNWBBoW+DKcEwMCuoF1EtPEn8q(b2K*A$l)ikDfM5LD4t<1PwL|+#!tAV$ zYTnz4zE3#RjmSamp$ni}d7aqT=OnspF+sOPbAD4}JRAzH0FUo-yr`R@N{Awt8owB7 z3YysSae0_K)D{j(Yyf{D_mP`M`P+kkpzg9M@75YgX7?xKtls<>W!=Xm3X5jfk$2Lphas~|G2f#TIU1{78bL#H_7~<x_JJ%L zos7R%F2xtpH}J_MUyO8L!)zSNV9?w?Hu7~Am`&LPri-+2}8(>^$0Uy3*%2YTC8xx<(dNHX6+9gDES=nLf?pTYB$;k$MQFa}&ZWrSJvQX$2-Aj&cl;bx4jpI&9f?m457Ns|P z3oL~p++JUmOJ@~1rW!e6TH0ycSZ)Hxx3}Z($tSV?$1yzQD8v1;7eTyJEDRgW!Q{Ur zuvO_`N!6!WmqP$tO8UnRE$D@i8?tDlKNWnhWMNCBE1vLE<}W5>KyQr>*{*VdX_*va zNRJF}%dLZIzqNeMiwJahCC^<)?!_RN-J+ex@-S6*03VXQ0y7h5;#o^I-1O0zSgJ_@ zILiS~swF?uH(>OJ24*ejS(6LR>5lPAJN zT*xSnpn)FaV7sXzwb(zAy%F?6r$Tf7Z+9bN?kl|NzJU$5K21&!eZUS(j^yXw4iqw( z9+uLgLfJ9&3on6(R^6na;TPK+yO+#)x`39~XbOF)H8?`ooO{2Xj!Kt} z>2cdarqN~rW1k8hWyJb zF=D_)Sa~9oS2yU0hP*v4_FX=ftpBP=C+)fe;{uoAkIW3{U!DnzhP&e5f_f;Kwgam# zi?DX{TYOuy2m@n%q1z=31`3R<`0K53aLO~3z7hg6txtkPTL|ucrpo3eeG45~iai#>n^sO7yHM&{7f(Yvr!^M^~o$c+siD$n=2z^Fzu<7l65_PB< zE{dy!ow3mG?wE+hr~6pik_0q86(Ih1)tk&eqzz&A>rg>9k-U6$RrGSbAF{S)=!}^U z`KNT)6k`KoS}SCAp6(~N^`7F`LKTpF{1FS!{>0;BYH(PTGM%TSfX*gqFk-eCzL)O9 z5Wg-_SJMw1|nRl#W;$o6eXv%s%d}32?88EjM2iTr{w}^j{DyqJ? zz?Qh$(^aSkJbNJYIpvV3qXW6;kqVT%R!91}l<>pKr)*DKA6Yo-8gBP1CW+qySY7Tt z*z{eEh@P3!V{tK%9j49?991OQ?zLohYOQ!O*~^rTOTp{EQy68S${p2L;@12%SY0?A z%`UiMPyRSC6ukfKhQFcyX$};g)ujsso@JU{0j#nb%ctI*!k#DApthzg%@p3Zz-6mxATVWOPl72H&}re5$-Yyj!pg7G`FVTQB!uY+)kpI@?D^joD4cj~>s}H>Bgm zCOPC3!Kg?i%`Ah3jNT08;QX7Nlo@ftDn=k?BpRl=-pgcY;hZ!m9)6w>7hLJ zU@WAn?B_+=YTRLI1|;u%D|T39Mc25!!6aP+>bxk3mV8pDS5zH&K$INUc3H|3Y$~b7 z^mBCRI5DJ7jfP((8g$o;6pR-<#5pHEL22J5Q0WMvOXFAauL|j~Nj`^+Zk)o`Z1Uvk z$>CU~-c7}u*YNBZIqsqQo4ULU27}sg{v+Cv?jDuEsq+y!(?^N3DJguEhdQ-2n#yZ- z%;v$`o4HGq4xMJV4z3+7fp70Z>Czv|c%1i3@Re0|!bu6#-7>@A*0tL@V8h{%PHR9Z_vO}vHyJq_?j*9E2)T;P|ho`A-K z?bvd0AkWvDLj#QU`BL#0?3*~1C)BMcQz^r3TO8o2c^~L!mGB#t=@9O4j+Xa&Q;#Y4 zA=6EQJGu;JDEfjLPG5(}@(?l|<`TP*t9x-xOMaZZN*@~PYtNFEcJ77;rEXL)< zL-Sq-ys4GN^cG%(0h8nSOD#fYg(=aT(GJjOseo5sB=TYBI>FCd3{LL~!1cTuKFfY7 z8mXy-ukV(iXU2G%(CG|U9e-fq?N{WohZ0^_-arjMeIXXV2avjnGhwn`E9n0+;p6Od z(bn0AkGGP;TNwkW#nN&#T4=@}t$0cdPEMyjS2B1-<9>QDX*8WZvq?Nf*%Ov8qx_lA zDWV!Nn6?QTtBX}U7Rk4eA;RqVa_d*7ukS1VGIA3Lvkoup?}DTDD&+mXOWMlv>Hk~c!AGKEkg)f-7s)3r41nFn<3tCRon#rUJNW0m zYp5rWj_!lz^VqKSB(HHbBMM|*h!iS=rFDvk8TQ2w<(xUkp zedO(MY4+jqd2~LJ2#F8IFlo6iXmcuJ_NN{ABYFwW1R0_0>L)Nev;hqC|0AF7--9<@ zy8QXu$DleiROp+1g)b_{2pYqFR{XpM*9b@p=^YZ>?db^8ZXU;nj=cnT!e!CNNCT6- z|2cY86!43a2UDE|>a?n(6mnW^f$9H-yMoted6f}(IwgRqQ3%%b%X7JNmZ-k6mDE~p zgEv+=B5Q8}xp`CY`*r`skDKpe>#uHBtQWPPA&*7Qu`8fF08E}*_ z!`*A=;m*`6%;2&j^8L%O>%0X~ZTiicJ$$HT)k*wW6+t5JFNEFMP2&83RFqZRi!0s^ z7cZNciW{R7Sa+Zw3?!{`@Wo>{7&~hVG#=N*V69HJPv8uRN-V&ClqR=O?h>7fSq34I zO(ZvcCN3TB1!r%zp^RTEwtVQpuOp>l#Mf|g{gE*=v^zkFS~Tj&SBUJ~Ut_mV85Ul= z4Bq=>#96B{kt(Q?q`{-`NB$WceQdDE@m4;pdrn@9o3Co26i9vkBHFUSYQP;)&Ak*$}ulkZD`? z;rG#s&>n1w$5+*n9PcV9)E!EWXG;kEm_JF`(kEz8J92$@L*o<8>{ zn^Zm$ljWMokkoS+e)<=wndpksZH~dp%+c`j@@0s1yb8Nj_G6>c8hG9x#zuNY!+T#@ z*7f%*Dqj8o2UiIFyz37$+Zj5bJaR6E9#!BC=IU@`+X(8feM5XCv9) zgLvxhx$wbM=soov!oO~kCI@~bq3!KfA~D8@EZi}j{92eRYPusZ!6KvJbX$gSW+aoR z{Ru3uUmL!;{bj4%P3h?)+n{iX4C_)?VhWLx#79q^evR%CUtvnnwqzPQO|D|TPCwYp zBujW_@EEpksX^!6??gufC-E%73)r$|20q>Xn8|-LgZ1`5ahBIDBnqaY*kv3KtQNfB z%M|Iq(j53Y{yFX%y91p4){AW_%h|l+FGWFrw$Xim%|O>p1I>0{z(MAZ#a{8BAy`;_ zEQL=(eyWt6j1MC%mc=k){7~qseF3kI?x`^UAx$nGD&X1eI#hG(Vc2j?@JC@RTaw5@ zZ3p2ET^_JZRh>=HsKZ`UU0UmTkUi}5MEJFVPJdJ^^v55DwA@j2=_?P>sIUg{e+?n% z>J=n1emRzes!S8rg-P+UtujF7*E7$$8Km*oUD$sAI{w^Qj?*64^Wsq-$x}l|9`yM? z_D0~|?p|IFmlxWBZY9O}iz49X+Tl27!Xxt4H5|=`#DeOgUAS$P3f!A=10=l1uxzCU zFg@lAe$-c@{(+%D)x$MLbyKdE(VAx0lFqlVY!VBq!#pf{ygbTr)z!#f7D z_y5(Bgl*DP`f&ujoc|8;4~#^4&1)!y)-?M3OwPpupXqR>X!%CMdqybm>V3oT&z#Bu(ai6%XyooLnBq}} z&hEA1{BcomQtmK5@weeydW<2@wGo;IJtsxkrZlfZ*tL^CY{$MLSpPwq4;?g|n24M4 z^Y?!2{St~@6BmI0ct5W5-4a98He+JiaV#B{z-n?w!3#rcSg=W!8vOq=Sszfb7(`n1 zy;?`7mS;1RUYxcHZXI9FuI%W8koN_kU3MBP znvyHbF3Iqd6Fn?AQj=E^6T7(-LqtY!H1jYAkVfGy{#=^=O?xl-L_AV#IPXy%O$6R1e=}PBO8$ zKAOy~n_ai4@Y8*Kp# z3z@mFGx5qC1nY`#qvQGr`uK4tULLm{%%47ly({JEZxuDj^RVP!m**1c+5sF^D$=t% zX9`TH%TQeKiec7h9#Lw}>+Z?I{=0=0iv+=N<6bHDePcTr7?>oUSW$pijPDBmvCraf z(tAn%Mh))hStUS$zOp@*m!M$mIX?QCBEKrB&Lf-)XAx3I^YL3)aYLscP; zUYS|JObsTXNv{@kPkD_$1BLHOyG|!J+=rb8XK3>jJ^Y(j4|`vHsrXYrnNQC3=SA86 zsC>GOdhK-LUoR%`*7jEv9WH=e=}mN(2!cTSMsC?~juzjl<;Cp__`y*oboo9N?s~qM zuhBTpwLDbmNncHBz0{eu9Untw-J^MU!Vgf+&~Q@Be@10B(?F*hxJXUNsJ=21|NYxd zvgdn(mlxqJVM)Bap_`qmJV5Q-ZlXf55;wTJpX$yPW8Y<6EL0bfo?QocbXXG9l?US+ zqftE7vl~9jq;hQsCmttsQ5w9u2lE#Q$-@0FT9GUlqO?jcO-Pl#N(4U%3Nwt3(9xMkv~(%@a8+oBr!djzm`A5Keom|u=Y8= zV$&=-yY&S)U+}?Qsu`&1HIrZ2`4hfsZz6K#MnqziwzzM=Dk!`;itO-yz?RqtIWGK@ ziGzfGj_}8Si2VW)e^hxEWI}f1miK0S(#-;dbK5{qqDws8aUN|~JctUer(o%yUbG8b z1QHjA@u-Kbq~6RHZx0FKV=}K253f)#^=>3i8`H7gN$6do%qw9(Y39+gQ}H8pw}S?%`KDi*TZ*7~cF6s;HV4!1Sr- z;p4E!VBa1C1^@0t@ye(6y;+KMSw}jo)tH6vmNl?RnXMurDoU1@zk!J+P7tQ{nUCm6 zXJt=?zL80zxk+joPIDhE+|B3FHwV_S1$|C1Nli=q%RZ2={k;|L-D|%pqfxt2P zSAtZsgzdhS&MOrpcwS8>6lKhS+Ttg~rCyR=y!)GxtYlDLF9EA=o*}VjyM%6;Vf1sv zeB5$F$eOk&lgo3$xc>ZL*tEHlc>fv(5^wWxQRs6l&(@)Z4i@~1XBgO6bi*Fu{U~gz zh7j$?;wu|>u>ogPxz2*=WODcmay2oRv+`?9KC=O~&k4ul|Gu+{FaR$}xq;fYUtnyV z0|(bF#o>mrptxoV_00^&#C0xoO?x#A{$b1AA0k;@qQ;}nyU-^$=Znr6d?e$uMhu>F$&wt-=hr@Y%dT=#di!{d*0|=N`$zZHSB<^fe zr8&*YtTyKYTr6EczYQ|wD-2@LFQErU*iGUOqf#KK)q^CVHyUqg64fs^fYN^xLGv>3 zL+iJb%-Gr7H02gry`KhWTk=WR6=Ve!NE0(Hwp~eDJ z{3N9bGxrY{-7md}>*T|*r~DS4kXnjKSI2L6A7N%+ zB=d8W#Bb2F|rn)T~Uf*Lhq+Xh#D(zQl%O*e&g$F z*NJuEd)D!73vPez0=rH3LdU%Q(6c~b(k<^Lcbu~D#F9_s`;reZ*I$c{-Z>H#JPbwiZQ zWUw8o2h~RV@!MKUOmyxc@kyFo&EY7tDR+RsOB#{ZQ|7k>cC(5`zxZ~^dYo(>NhGpN zciv6A;7=#>bj(NOD014mXz|R~#zFx;LpF9}aQ^?*X>}6x_L~yx~fiz={Gpv(y#WY_d%o}h{ zys}*IM-Fm@`Kmqk7Dg3#WMcx6vyuSmmWRaAOM|+1NQ2Y(9x`k~9=yy9W`52qu=uPY zhQu1Ojfb9M#Lp;@N`8lyZxXi~C23gTbJfVjaCySI&}dT#CTRevar zYD!#>wiI_vg|l~E6h1ldh@_)M2iF8BQRQz*@OJ^2I!k=!$Jb%AaUhqw!%t>wJtgi zUwoG0L<>hECFKq;4+pUyD}Ukd9L^&99Pq2vJ;)m{9J-@J*_j`muzKhTmatcw{5cVc z8@dmncgub3&YK6h?wMrLu!nFjMh14=b;j#vGuZUaOT{BK&gH|aDv*wF$dsrjH%89|nxjTcMyj>6L`Dsk}C zWwd6?di?sO8vSHt=zzJqQSlRF^Q_iGxq&RUn+D*O=?CyXbmP=d5ggK{i{p~qxN6-V zmZ^4@O_>mfA52~N+{jc2$}WdK6K^ro8zpId;vpNCVvRtl$>PZo`VL(Dwx7|wG^=ULyU!<3R$ zc;kU2b*sri<;)^}b-6K5JQ4#%_ZyjbwG4k6-4E(dzeBV0NUmyc!jF6yO;cn{`G9o> zd|O~5`EPOs$<8*7UFXulN`HojEJ@? z(<1|4F>@i?wdm?$YP7`_=SQZ~9|LB=9xr)rf4dD!PMv_~x5~j*R3$Kbqd$Kt)hM#q@3WV zD&J*1iHm4Ic$s$NcY|&8!I*Gr9lT9AkFQgSu4k}&bq(F2s{`+!P2l%xSMv$^3Gj4P z0riX<%9}-bd{O!fI{f1wahj|G=WDHDR(=NGI_(>csdeQ?C#P~}VLtl$eHiu@$H-3Ks3 z?J#WJ9m>1DkLMkdhfz@~i>^7G$EVD4<0H2g(|V_N{Mxq+Jbeh7yGrr5uB++n0n_=7 z@v(Se{WsWXL3!Ty1pZRPgilN?V9t*d@W;WM5VU6mEr^QX3s=;E{kakvl|7n9{!D;3 zs|rxzYdCxtJlW%)pJqina!}>BEw6n(oVSg-fC?L&F?i{0Zt*>xx7{$HF2kDOQjZtv z`B?K4+F#K3@GKZxGL|p2PlC)VZ(wCxG>=?RjouF0yiDR5d8g%x_V+?jZ;2V~Zkx|H zScb#;v%}f%LA&w${+)b^#Z4R-p-A`NvW6YCnYe157yeo-O{)%kW`&WBgubw(p2HI0 zVp}GUUiKFpdZV~?%_vwiHJRTsn8U5-X=3iUh2rbAC&ARW1iA}Mx$1^wxSleVA1?!< zRN%{X->2an;e9{!-~iuboh?c~C1k$e*yFvLG`9G422T#WLHai7!r`?ih?!_ITX?~a zJ~v3fwo6Xvymm8scV2~PxBdJZyd%yB_Ve{aePP{KTmE6M6o0nM2rrKf#9zCwv+3r0 zA+5rf|JFS&_WLKv=Gk;WSmJ7aPi;FpFyRgHDhc2R6$M{-h$KxNdqI@FTbmp8yP=%g zb5QywP3Mg;N1yg2wCFhq>x}9kJ2w&=n;oF#Z9Uk_zvMARB_z6T6ZT%T_M&kvY7ooi09{Ov*G8> zMVE>f;gZ+K;hsRjk?~W-aaLlO5#lKpZ9EO_sfyI=z(bN(5dYfAiF0*4@#jVb>XSuyjiW4< z9}%IO?^oE}ly$dCR! z*#6tIVnY5GnA~$ptz_n>yX?HI zviRhj0+@DTG+lbvk*|@Ghm3)8^nu!Q(Vw-mVU|S-*lCAhw%iFUJ2nQd&$7kgPwFr> zajBrC4dv%QPeFC1EO3!1gm04Z6_0}l(RBejJjgQ-RdtM@Cp;bruY}PTqd_46aX@Gv z>{@;m1}0v|_iifmM$<9Uz{*&`BTpJwzLzJdjP=J#@bIW z*1PztBN*kb7LpS}mPKTlM~cq2!^6-pDDp;}Z2E+Jx4#PN=bd1QWFuyM?gjbIzeLyk z8anMsXY@fKDQ(pwFW2PY5yyNS_^g<%dmkzEEG&Qx{b^zciF8~ojwLU&#q5Ih3G&`Y z2a{$rlYipTeC*F0%%96xzWiSV);O6n@;F zLd1pwn{-eoG)?zPhr5VbQ zI`|5%wp)!I+Ud-y-bn0a6DMR(rg7{1-z=-h2aeC4h+)mu%+KNgmcF`=pqhzQ^5&q` zrvv-8odMa|J#1H_B3!%Z#D0Y&!1tyx+@|;v%MD(Kx2CUTBVW~FbV!ZE&B=9Quk=(b z%3eUKOEYll1VF8Xe~@w^4|TeY$g#A+f+u@3%=;0`oSVO+dxkzwdp8q@FWqNaC+5Jp$p!3zVWtQ#1z>&0BC>Rt4mPal zVt4c(kgd;O3Uj48e%LU82igz7zg63q7kt&Td%8mwjo6?=j|d+g&6R6Y~7EaBShEWp(V4eF=`dV!n%&znXtu9@dvTKgGA<2%s3fzo2i$`G7 zOb=GKU^Ui^Fh;GT&!J|>ELbx%3EJFS*r>&hc+gRoD7C!9D&JDFApI*Yf0+QO&JMKM zPw3!iDZr5U5x{?AJE zzV8YW($IpL^`%%{ok2wN{1D>QiM4zrx}H4)^3FofPTqK2D=-bd8rZVPvw*h4jJf%! zgD_aG60~+4z@H{>ShUwLZaw;7g@w-vvU$^B>Mc73tIA#q)t| z%-enNsOC7lZBJy9f|qRBteg0Ku`cAD2t)CjnIN~c5w;&bjG_Nb>B(RO?Q5m*Y1VSo zYqAmd+b!S*orj<&ITPdKmV=3`2mO{+geCD>;5ug{f79}x;Neq3mu22$w%J+CDi}$A z$gTwKUtJKHSByP{g)n4x2o@Pj!x!}zxG>O=&iiT&w_2jWnna_Gbf!qNdoRY#QREZf zTa}&PAnZp@5VSTf$9c;xk>0p|oKruXzOxG?cj7a#b80(NyjqU6j}DOxV@p1Ia~_&4 zctkwWT0GfcHG8n7lmusN0p*$BNYb8k{Mh#vBrNCiV7=>(<6kVsi+Lt+aGZ{?_r3v2 z^3hm8=HRgSem3H^GPgQzh}t22;Cj2CT{lyQ%N|p~YHTgeN_|Dzmn_4u;vg)Q*AVYg zo{Ib8Wne?jAzalk7d~E@BY3ccPTHg^)-3Vj7PC?F_@u0VQ224#TAVa3F#;k*`d`eFO^u+1G4~+v%9PB{`2%12B zwlNM%vBJJ^KL{vKz$+(ykei9$$Tr6@u;79%al0W2FM?k)+4HMW%6J^^s_2Kn06%O! z_)+ooKXm4nJM{g>)<#NB5LJ!MWSsqVu~vSblpTpEli` zpA;R1Q+G94xSFNF-~aSM`LRc7vq>yJ zCSyfMgbfAxK0Q9sq79#GZ^NWxNti^X=(j`pc(~$>K$J^EpHb?3xJ?DEJ$8>~2^kMH zuYOeD5K3!8s#%4=3A5X}4Bveem@A7LsrULG^v^G%!Di`X)%43~_eUi5Oc?=~@rl}x zwBVJW4)YrI$<*RS4xZV5AD?|7utr~v7v3qPRhvDz(ghQNfsjte&7Z_-oWuE2=C}V+FD&kUu=H`*6sr9>6;>*bvT?&()5Neo+HsB zLy4!QEXFNs%J9T{9U$3hJf$p@TaIaf{Ms}S5A0xFMjY#RSnzLcd7#lPEGE9^V)>3< z(CL-pkt0{~zfuFxqB;%__o$(}&@VjLWE#!Pap%$Ir{R#*UnnxzOz#RFh9{`W^Cup5 z{9PJIM$9cmvnzJw+hlqEH7E`bo>~kI3U18B+aKQvEUjHr6zS*ZBQZg-5lwcD6xhC- zc(K4(Hm)>9Yt57R@w5a{pRLNB*KdYf#07rZTy~UqJBc-;^!VAK8my#7mLENn$tsp> zQm^dq(DHOM*Nal-b%*8Y+jF+$#p(UHPo%=Slcd33SDC;5dIUdKK7~UXGvL?~f6|ex zO5g3+$_r0y7uQAV@qb@8a>=#FvG_k{79csTBGA49G@eNC)B9RTUP%j%&K*oEBZbUg zp@11d3;rfo&?-Ls!X4%|bFA2dA05#0O9gH_ zCSlp?IV^A8MS9|8E(~+s1Y7Ps$H$(B;brV%eAuZ%=eN0X^=T#O6=;J0gxWs4}+~;87#Rea9%xBg6qp4`XqWbF_zAvC7GhBxJ9J0ej9v<0dgn z&CUjKL_U1{u?3GEIs)gI7A#jXWS47pK%MSuoZ}D;pB}Y?@3-;LcX0?F4c3F7M4x_i z)uhV#5}b$6hwYJsJ{kFz9Pl*5&#_6+o>7QHzH0Fx_c(lhe-U1hP~nMHE#%h$55Cc6 z7`b#@liui&q0Zu`#Ch{m668{hn>?@Llm~}c?q-7b6+)TJDJg2TM@7(OS|CyHF9{g) z9&P<9A!&Or6X;^3VII)B+h;^M$5mnf!vavVKZ{ah;xK2_NH`E|0OXG`Uo>crkW-tA zkN8#Ck`W0bccr6sjw;<<7KbGU2jHHf6OPN1Ab-b)Vz|Z*tY9m_PBsLMhe?p%9)Z^??vHaS0Y?-IZ`yMZ5I}Z!2 zPxF@i@oAV$(`UpzT9E!o!w;||+4xrj+oRsV9IO%f=xoLaHp3XGF zP=yHM_Sps|RqRIo{>Wl^SIY2jbS&h2@?|<&Ddgc(Wc%G2c_T9sJ9DSQ7&{T{bc+KYS#_Q( zmk37{9buzqmN;ZagfQ0*=cm8)7OlR2b^9nIs+jjWS8Maxk9XjzSFx~A;SVmhOoCn0l;D1541AM5K7mb+iytDWW22NqcLMjLhs=5mG9o zDdRcUX%Hf#R8qZ4L(!tNhxh)|U(d($xu0{NbFSa-`&HthlT_H?s2JhyUl+PU0!3qP zw?XporI`7opHzftgZB^_>ma`&@O~}A5h2^V{fit;yi&lbzQ(g+hi-KEGaAo3STN5k zau}2rBi8;g4xUQNqj`J1kef||k)Dgmq6iPRdi6cQQ?>~z6wblAYR9rui|;Yjm@63b z!vxASM`6sn!|Zgf3qDuA0XN-r`1JHeu;=1+Jl?QIa39FRiIhY<)>w`e#)nZ>rbd(* zehKjT9DcF1oqXOQhit{p6*)=d`RT2sKAxB$OB0Q6ZXv=$y#H~iGN5j7z&xl z8x}9nz;d*qI*GyU$;59*6=NFC zFh6~yaPE&Q%!4jZi}e_aLE+j3#@etk$CV~8Fyzo5|7gV&Wfh$D;AaF5PNd}r=P zCe2kOYKyW&hd=kBUS};)9yANC&5MM%DbkqMnndR0$MW{RY{+d@S zwZ8V^v*16>p>G8o@gKqi&!=Kq#T6{+S4GS6c(7PE0;T7?C!K6K6!v^%OJX?GT5jXx z_iLidjX1G{&D_<;m`Qhf#FsIf^9mFg7BAq&yJ3LD?=i#!iWtiy{TCqXUjz zTL7h|e%NwlFIW6g2_A_B82)>Y$kE*a6}Oq=?jO;-bcXP(b;-g(nQBsTssO)qdk8+1 z;g}%YmCU@{&ax)i!uqCn?9dehHdbm6u8NW6vT|bdYFFX00zdUYWf@3rJ_bi7IH7i# z7ig;hnfmev8F(fL^`6y0IA)2e#z%9f_GN78D;v1=r_u7sINtEIRs5g~ z#POj8;_JemXM9N!cNBWrCzM99x=R7D=Ivze^J56@`H}{Gdp?k;Cl*}u#Wt|9{z*DT z19_SLNQ$d`NlAV({Bc-??Gm?0gIlS1v|llHS-k~`HTUtKdK7mHTtcMg?1Jh0GGJqq zDfZ5ojX94l^3cy>QMh*;`Iy}duSX|A=lJ`4@Yw=hnz_N+F}|5Kyn2S6YZb^Nsg+dW zeh3E6el9Aq*-eT^)Q}bK&jc2*EPX04VLnl7^jbawEkCxPuWJmexp)^3{?ex_ZL>fH z&thQ1HCUUroOU}ufoG#+@a$tP;R(NjQx;i*-MR%rhVKIWt&$}NZ-?`F6*KwsZQqzd zmpcq~OW^9&0ajxdy3rcRar}hZKbY-QO=3z`Vz1{$s8j4CQOl?Dp7JXATy(WUY0!Co z<9#nlwlYJ-kPIr8KR{o~RpBn#Rb0=@g|_@BMcZ7)&}oIu3W%PF4my*B|16laE8*HXZ&-Ii2oUR!c)q3I2n#8IX=2ba%mdxTsk97GO?|gxmF67%3E$D7ig0FWg^Ph8C*vtX3XcDeZYXT&A zQZC@mB`byeS}~?ijt2w5TOhy2gv`8_3fA2Uyv){~8)~G$4TD_kuj3BG%IWDiTzID~ zJ~D!Dh}w#cLdLCX=K_(7#5ibF&LC5FuHsQ+vhZ=sK5pBy3oYWDFl(kWxUJOZ=AZM? zE#?{8J~#zG|8qrS*)#Au@EmH~)qq!vH}fHvMsxpjP586Nh<5b*k%NwEcsWUz|Gjhy zPA!%t^7kcZa>@l%o_8KL&#wXJ8NPVMtBC~hjW8$hDIuA4;y>3@d4c?G98Y7}l?iv@ zRdX=DDLI2mbA)qlkuD7}Za|q>3!Eq9_k1!0H)l#0EHsp-jkYE{B5xihI=;h%4a=Zz zshEx3xkn^z(+!WJ4~x!6OoYG2!d>L;Xk2%xo=ljtg^uwZhv5=YWOGy@O17o(=jZ-l z>hFErcX^h0y6rwuN8>K~_W48NrC5#zE0pP`lJoSfwhvwQ`YB0-{X%ISWMR;?gm^|va080Z? zL19IwaObv1tk^f6hkuhpsxy}JEjL)L?{xCzL>=4k_63<=dJINZC(^*5W;D$yvf^}0 z1$#5W779v7P@^J6G)Njqr&?Xbu9FAI4}l4{QMnXHN(gI0^bzK#lg0K-k*1rojuEAt z9q^?$7q5=(24%Z)P&k!|Qv%zO)mclBxdrV)YzDw-;n?!s+<`q9j1Ef4!xij5E7q4vqu zxIldo|61}LSVRq&coAFCv)1--y#oPi<)jp7Y>j9w?^NCmeFq*#EpIz%U z;xcDs_>P8j40sknoMy|Bx&BMJiTWW3Jt9F}BD27=sFIvXkHy-DZ&4w$fxKA%mw8n5 zv1r)@+-#N#t2gQM*_Sj(|CI))%5y}oZHq~{=L9sm1|(-;Eh)5(tVCUP7T{EqiFDsfby#s}j_8QoI;^EB zFm<{S#;_gi1x_Ya`zlF12yEI3!|9x3H}TVHRS3Fz1iPI^b6JO4xTc>*j?c0NZRVuPVjXFG-^R=mYGGFJ3vsVrD0;WL!V_D2QnBqf><(1KMZ(^sb@U$+ zcKRy2Xj=z&PE?R8q0?&dU_NgbWY&*FHj-=flYBXPE495Y-q8Du`!k$V># z#P8b-snP`_sQj=T_Uw-q_k2;n9O1tF;`-Ha@QoTDxpyns@{U5XQ#*09|A+C*bBu62y0{tyIbByuHCW6F-ti+u{0H@?+FuqRjNY8{shQA z`53F?^3b5&fZOyZL-U~Rn0H*8eIGFik3`OAzJEWG=6{SW>s`rzPrC+})d%6(M=OZT zDoh<;F43r3O&->=NueA!0w(CBeec z21;LK!0EIW;rSj+X6^J7opZ9_2R&99IO)$j7A1LwS=`?P-_I<@4W7pZ?xPWRC25$rFch*UGYrif#TLh8;4PO~ zxPEgB$p4*#IeU%pn@cR&6|sjLcB;k8pTo%|UvDN_Eb#cd)`6!-92#_)v)+||Nqk77 zb<==huq~|{XJ6B&_y0y&&(A)G679L*cXB=&byZ`wwH#ewuMSU&FJj{iF)qw_O}1`* z4H=1{pj|8YE`@%5#sJUk~99%rTKYa*6`i!R9P{nmy{4nV@)P8!U-jB={V%VEe!K zP?9nV3hPSH{pD2l;?;ceb?QW7s&yS}Mkv73P2-r#m^>)hB2Pb2N2VvIO5&d06KO}y zVWK;ZxH3Tu&*(PME1SjQpp>fHeu8G>VA#}?>m0wL!ptmiZaj+?81 z>5L@k>z3vwt}3)C@_NO!hg;#v>-o6P*%uZcwPhA1dMK53N9@#b7mX`w(c?uUwAy9E zw(2Ncs=gT_RsW%+K?}R1QAnN$vdf`m51a><}^Gj2-q0Z2UZ&`ebILGFLv&~+danD3x*J`u6 z#w?r}_#ez&_^I?-gd~JD))Ob)AE1>iO-GisRFs`lpwrFlc#fJEuDjiUee<+1$;yxn zZ$1Kx8{81bmNBR3q2TjK6I6>#;ebJ#kV`Sf=Q<1dA-9n%JFgL&Uq_Ip(NFN(kmus5 zZK`7Db9b$eHlJoWE=yTSm5FdaX^Oia4YMBQwgdVj9}$(?$C+5p8lp^2VTjvPB7Yfp zl!Gf61}!3ra}L73zh}WHRfigOF9L^eX-qUYR`jKNH@osX4*lsm$TJh>bp!Rd%57uT zYuJpR&-@aP-w=hb#>!y*hs``bxeC<}2y>tR!00VfeDvIZEKM<1oG71y^~=Zb`khxm-t-zuR<6d8 z>ptTt-<8mw^BJDYT*5ht(`e2-FX#`o<9QzKSLz6aE*nhX?YY)vA zmtHDmh5HwQ5 z@OPCgd-AtB@3)#y50&GA&&tV$ zl3f0{YazzvPe$d`k5K9O3Qo({usdK?(O(BwtdyxQj~BTeIK+!P zIw4YNIqGi^gZ|-QIB{4BCVrWU1~*m-e4VpMqoVO|^-O*=?=v2W4x^`se1{FuZWy>= z7QOg1mpcw96wovvOE}xqWSJp@3>{o~Rzv}=)z1@*6?X&)& zy&N<9b>Nhy34Pr$8kSm{)ARf0^81}{;8$b{9Q>orkB#vKXT3e#DrGTs5}1P#sfY2` z2NMeYqfp!<4`Xs&sGaB_-7-EGkBz;E171unmRHm{}2iC ztQJWnY@mZXwxQhH2k_Bj7LPW0jxE#t=#Kp@;F%vI)>?XsuLzyXz3RiTdBsz5PxK#? zE;isj8lT|mHdBl$6mmhC>*<*#f-|J*BdOo9og}-KLU&^h258MEnUA|f-VK01zHK6x zq9()ckp|2<;!-5KNW6~T|`G2)H^HV`w(3*VkBz@|gGxV!WR zQT$U3+5z=gJ#H!*PF{@Z5A)!qg#v7p9EFulTqO1HFp0~`V{LndS$Sk3u3cCMh57CR zOXxZ379GNz^fe^@o|&*OeFSTUO(2aWSrB0ufl0p%!T*x5Pcja{>GM{S-yLHxRwjzS z+4}*hiX$Hr@k@MajXudL3Z*xPwX(g{#ylY_kG^{!OvfZi&_R92A^mX+R%YknbQ}oM z^BzO1gfVW3jf6)d*73#Hw(x0*T_REGWziJz43G)TtC-NV5*jy#lOK}q;GeEbmmPG6 zo2C9RU`USWS-?-2wAzYyZ?fn5t%0c6>cVPQE+IZFgk7BW3exXfVr{LKw7J-ZX1uc# z4cIyk{1&`{J8qdU?ej0uM$=j3)Y3uxRH8hD9D0d$_VT>=`z1Ia9t&=L(sa{`Xvi3P z2}`o}z_rPa_+#I2swi~sz9w|B{f@!VENdaKYXe*z@K#X|9_~=5+22k>j{gB_+BFIy-2W1#CL>;3(2XLQ zo1!;{-XwDHLy{QwgB%V?g=cw@&^&|U*gGE}*pq_w%nBSn_bRGJw-AL@4CkWoo4hah z7iDsB+U1aF z91faudeQd$T{KLzg2skCG!M9pQ?g}YSx7VJJIm1M_(RNi>>Mz0aKI}&^m`45JEGsdrS z-f-ugDilT@gu|UyLMJZ{;?q-c=Zao()kK|Lai5Gw`~EO%g9dyTdmQcaQsLK+dh8B; zLA);%Vx5O9tk`@O{w+NXH~LM{IIo%wPD&>yH`TLWtxd2`UmCzqfo#0b@zbYpBE5Gf zm@kzV`CrZ!Z+n}Bn`JhLW94kfg6=}L>(4nXd_Ra5-g+Y*Kh21Z%Ku3Wnoi@puq!Ax zZKd_R#f4z07y_EZnsDBfML1^WH|Bd)_$}T$!xr|du%m-=1SX(Aj<)rKll~{!i|u1@ zisWy0^{qPC4QgdKQ{?H27iUPRsTM!fBSNPchp}d6Bq%O2h0_8n`iPtlTO`>C#_J{M z^wYb=A9`#_v0o{3-x-H~PimRZA8$A_d%3mFYkB@y_Y3x~)xulZ8eFa7EPLTu3|;c! zVjquAW(P_%z~PYS#0^C}cS(xOogE8tX2Q$8w_8u2f@tb$i|>mL z!Lg(?SUB+{rWR?SX{Rfm7)A)WHkBOfZp34u7cnpJGH7V{;L4_(urJwxD|94^%dOLh z{zz>|oxYbv4xIx>WphA=r;tT6+p%Z-V!?A(3Fpmblb4^H*gx-b=672X|4mX7vNrm( zeEm;SzF-bs-;jyxIt*drqYQkek%Bh1H^6FiHl8TR#bqSdb5WVr4JBG*<1KBzUG1>;*LaXhwutDCHW#|q8tLSV9oKy)HN4PP6C3~oO zb`EBDL}Fk2Y-TSZ4GpuZF>#h9H95MK+>f#0SyNQFM<2uS_s@}tj^X&=+-SV0VurQr z>`@N~(_zAlpflqXQ?2cV$dW>`KTw2I3a^O1-0CL|Ce3)VqlR6L>JbgOHjHMd zYw(-&2QuT!am|Nb$hGSSf8Q<8&-Ow=S23)rtHUR9&4NGrE7|iw2KO&EBj@!d6VK60 zAf@^PTkR%Kq-w@PiGLltW|{#m+YYm_55q(bskvlybPAJe&mu9G3rL8(9DTY>m?L){ zfEnKxux1Na#U*mpGYM-eE6_CLC;a*uDt@MVN&KYF7&8Q(uI^<9)l}W@x7@WR|C!5SKOgiCU#2 z$xM|(XPXx!?O+OoH)}zE;U?gxhx4`8HA3I?Ip*xr7X4>2g?r#FqPuT5+3dQ7EnT@3 z!*fzZDC93Dd>lseV;vwuUySc0oIu9?E);A_fX5rgLXp2RnH^=sLrgAU_|IgFo2CXc zTNRloaS*IaDS+^_KWKUEA)BfG0M?Kc!6|S})H6C9-sNj?O+6V%Jy(kZM-F7C9sh|f zA0&`~h5>xVpiJVWR|(PMr2wUb-#%5uDlJ0@*`h8g*!vVF?udijhU=vK*m1#&neMkB`a~`QD1j`0ju$-zn9?Ztj{PZi%iUpN|WDrQp}BInRvEShJVC_UmLi z)tbcdtpN&|Y|@o{1`}g`kYefU;PUJ?G)(Eig?hc>V*N>EmPWm3S+N5z9jyc5T4O*t zJOk&t3hQXo2o@F=%kN8%hsVMkHa}nxI%uTg(!cHKkw2Lm3UdnEIi?ul!=d+cA>N;t z!wSx+@cOF} z8{}L3!O&aJK$DyS7rcauK~7ZS`a;-mw}H1U9E{qt6nXxM78rP=9ZOxlBb30}e$Z&C6jL*=K>(`te!@&fB^V zoNCWu(`Y;TUf+;wxy*vkHTTdde;G_yxdPY5+o50DP4M2dfE*NZLkU0B>CpqL(IeD{ zu96F7Mmpu}`J-s&_)3i%Uoaz?M$(Yxlgt|RMq|Of#}zd&4qlusC6?X)@N2pQ_3YLM z>1=;^8Qc$xw?#lgem?9S$5793INjlY3fuD}`2fup9R6J$ANgre|>HMmZN-^G-wqD<9^|J9@ICDn))&}X2@ljCUV71r}N0F zQy~!Fuf~HM&a(re!>D*{Dl8opLXVtVNndFP37IQB`uy7^9N#*c@2S~><(G3n?QRKQ zP;>#6daSt3H!m8n$Dv}#j&eNk{D`S)_;yAf7?=jK$scu~Q(K9rTo})j_ZC6pojBZX9tU=hs&I+N zVwxE~3&KOciS*lwAiVA}Zg)2&3QvZ@_n!YSyL2I}@(Jb|rRwy=!!R`e&zX1FZlqmp zYTUaoi8%=8?Sr{i#MV9-)VTy#i%n+cLp1nD`;Vl?Z4CII{)tV)hM>CO9hr7<8_ymy zgXS9vcUOZ8>7a>g`Ii$z=&jC0bh`2(7}4Z{cLWEd>&T5fAjnMUv>KBJ4Jn?R`$eqp zeTP`t+VH<#uW{wUa`-W81mAPu6kK!NOU{pRCo9(kV7~TL>^-mm?E;m#`}RHf&1VmU zy>}p{2NK|_Q8KvQIR(qgH`BT58Tfd&1^qqIfVX-oQLC9!)Y-|BMz8*fgU&ecS#4gt z@WcRgUlRct|1PsXqZVM3@R>ei=JBGd0tVNo3(}6x;PH<0>6D^2yzu%qn>cs~F38`6 zx6Fl(&Yd*&oj&&KW*ByRjm1q-r&#IDHXJ-t#_G)zDSD{> z2$Y^2hk7ZFT;_`jR^?jYc0(`hTvQ-__*jMS+*48U{@f9?QWdWMc72L!k##LB73;qVM_>!DZ-8G|-BsNAq{l7%dHY=+>U{N5Af{K5uhSZI*`B z9~0nHuQq>oy^aLl_T{6R?74D}_DV z-UGCjkB1fsft}@)#$yH>V~GjH6pJuuXqIBC-J>w5OrOp54g^=HRO;ZFLtmGMuwNyv z#Jl4J%bB~4m4=>Y>Eb11m|rnFZmbEvguTMKf>pff!zj$2bOGejf*~lk9N2kB(e>sI z2qTVcv-S_L-&zCar3W!+js)D>pUgaDUAbha8Lr-V9b3Y0L8+`hTx)rc%VIN7Vbm~w zajmeQ*?yenIL%`@MCcx(z&kNgLbxfP69bI9B)Ps6hNMb0IQuuZa*7!DUQ;iE?qd4Z)~a7rE@ zW`yIRCSgXpPvEk(DU&|cx8&&8RtQgC{+cg`C-9@dl||$ zN#dD*H*hBofW+$o?BnG%qLs>{t*w`JgV!+&hz^X!?@x}319FGZ{i91ztH_V7oV5wJ zHy_3=qKhP>{V`Ss+yx!eVRU`)R#w#`!R0(>!*;i?Z26zTV4Lv=J1srIDli+q)2*mC zJ_)W)HlQ*Sx<%f_eeBIF6*|dXMab7g!1O7HP&@J|oLJ@vzofpg$rP;jG%6x>~0zPD^N zym-8Uh!wQBjpALnH+{XxyM7)jkC_EVWecF9n_yF+n(%j}_=$NN={i(L_6Rcp75K$= z`JX3e;&KiQeb1mBt6alX^H^PrOfkdlc8)v98CMh0@F&%;r`Hcv}avC7(6D8K2 z>!8+U6PZ3Y5HlWAma$?D`4V&)W!Fg4$5Jbi&7VvIhW3*Jt#YF9wHEW8OQF{}50%Eb zLXS>5@RfmNN(+$L5sBo;J~Ql3%EMJMudsB;RyHg68Bxja!hwg)`Pg#{(eY?HnC~}0 z-5GCK@VS|mi6bJPz_zW?>}X2>OH-N!)eDDX z&XTw2buEsK>W_!^?l@dH>2F2tcp6G1*y>_+h#;L4FFCX4t~sEpuU${~|c2CPRN;%V1^CPO&)mk#OtI zdiZVS24(p<3A{0IIYgRCz(dE+-TBdXdY18-$w zS(ngzsnB@IRz~^~$?CbFQ9KkjZ;i%BGcDP381W#3P+_2ULnEM0=vr88j5tq5F{sA&CnDv})^zeB`IQ*t55o+cFEU`+$Hpkw$l z=x>Q-A%58~Pb-;JJ4lFQg3>TBY!YNXFN0}0MR2;|3~sHQ!;1X&q53OzeD>>sIJx*R zv`Q@9AZKeyn%9k+1Dh=F9*0ZsbifCnvK z7cH`Amjtrd7Bq_FnCJYz?EZIcs(Gyi%$yxa;l6m>5c8I-G*E?HWeJ+~+!#{FC6Vn- zyMVm8Ah0p)(9Yiv^IoOnpNnqdbrz=}KllnRFjqpOyY?j8B$S-;EGAmk|B*@kG3-@# z82b`=3+^yWXb|R%Q&vC40NoEzx9k?4FL%VHS@L|lYBE`R{45TS>cERT69m@6N4y{% z4olbiLhe09A(KB4S~h#Y#Tz5QRIggRov&eR@(dU<g`x*|Ta zOpEVSaAG?*Ji}d**{pW=2lRL>~TbvUQoY{bvon762E2mSfUEu zbd_kGR5DT5^WqyqPr;DICj9n`Xm&E$0QC!`AV4z}E7j{U%y0mE|J4oJwO@-iM*kwt z3l5{oupC_Wun5e)WQi@CJ;~W^Q()+pbJjVd1O@NJm&CsJEO_5i=GjI8@Vqn*$7pYb zc~_k=%2XGx56l9Gvo0d7Q8&TKwurbG1@h@vB&fK>4sW^Z(w|J4N$*o(-kIj;)3F^t zYP z>RHq|Uqx0tZ$iG|4SaDq#;i)q*-e!O@jDG8RGY0SoE5X-P@D_TFO)(KIhy&dX8|MC_@sz0xRd3Bx*0acH@pjC+O;r1K?~&_{ER)5g!Us6x#lI@tLXPOCbB`9qcX8)iTY_C<1A ztyq|6>CC%sna~yXEud^R34f)eh`YRmqJ=L|X2E3o#PJG`d16Xe zU(knPBZpI|vfFIbfoz;Ic0b>ysK8xKr0BQ87JvvNt~aQcMOYMydVJFH)AVp`rjhi{ z?3EC>?;uUFE(8^;nfy`tI@+8Y%a0tINkb30)1?a}Xu-?b)VcT+>enwqAOExT*MZwu z&uqkd?n%)-1~y#xXgUrsY7<=#FrtzdZov-IFwtAf_gJuBpBqdK0SR4WlAb<-P6&H| zn&~&;a_VnXxU~sl!z!WNWg08Ev4m~C9Sxt`YlU1-ASCW~=IsVYA=C5{(zk*?;rMd8 zUCo%AU)cc>rXy(Bt~Sy4T3L4U*;ssa_AJUB4d#j8bRk~YoBp@{8TS5ggZP4ZFzMM~ z{>VR%E(%=&;w$;|z<;q^eRv}{TJI7`?paDh_6WHvQv<$7L64t$mLSrY0BrCVdvLZt zfm?#)sGsdmJW;02Yt^)9dq50d91m1WZ71y1pIV;OA0?7*yon!`7vj@FaeORBg1zcR z@Uc0AM~lx8`SY6eN1?veFY{>nS2+#;{hdkmsz&l5gFXWnc=`XLGQdmdfLU4QvQm7H z8HTEGOS~Vl-cQAy@5J!xuq2J^)8R={Qhd_ebg(}c!gq874SDW@O9lqxkN6K2Zx$U! zi;6+GGy5jWO|*g@;kW-%@Vah2m4LTt&_lwGyefI=JJ^P`syfUF@xR8>CV#(bT>RAIZI9tD+V8cA+Em z-!^TUAg~79KL*1j8)L!g`G+lym!p9nl5u!>87a~-rZurCU?MP#8vb2n+U1dam+LW% zvy|o=Dh%mQ;cPViU@=q+o|B#q3Gs^+arAO{B<)fhPpeh`id6J-;kNEqP+gq^US}?l zpEI*?%swrcFuMnz$GG!9^ZLPN#169Cwg`&0rNGf;KIFOXD@eUC6((qk8T(xY!lQ{1 zf9A0ElM&3tWI9hSl0|j(R+OJd$Ov~ISn$yYCvE+NHx})wh>M>R{f32BLo@2mZW12O=YciR@Jl|s+y)?2WyjmO8q;W$D3Sa5jG2D3?Wm^|Pob6p*c(=Ryy zbi=9Mz<+7poLGz)Zl zWJvf+f$hC2mZ&~asCYT04tj+O;|G zd4aiB1U}MckZ1XjXrMl2ahD)z&~SP!*B<_z^uwTjdHm zL9&lTX&<7|!|)e7RZ=fro&42mcvcQP*%n$+gwiBhwGt&wLtr3RrsuCOWjjbJ-ZIN( zyHwMde5nam96AWocDyG&TRTKs3;sf>)CzLUWP~Wm$&V;bO@!O+L-_eY_86Hoh&l)O z<1d*782Rsta2jb~Z!UMR;FMPKu6P)E5YRwsGmepVt0+{nyowt0 z(@-ht2zcepM5kfHF?}Hth3Nyud9InH>sJb!?Q{ZW=f=TMB_nDY@|$&TkwK|rWpLnp z8e5_=KybhzIJ{KCzQSx$Y1>WS-02kSjJpdqB`Vxzy%FZU$za~O2<01V!698A#Cx8z z8b3R7O0SWvRTJSv@6}{T{WP%qZ46;&rBHeB9_aWt3&$Rm0z2WH^?6c1+4ElD@>grp z)G&K?O2ri-i`%RlN6!amBQ2iz{2S5PU?sSs1z45Pcfp_}Tp8Pd`mH00vFcOi6qPG( zidlpWg%;2|e<@R(_mId~q#=5mu`|nyu+%L=U?WWjyVE0}&UQ3Eqkb9|&kqNstrqxY z{tc#8Qw9^}C%}v@XFl*v7_=Nzq2GV~g^pXBaN8mk{8V=W9*vpL>kbq{g1;}EJ8BQo z0&Qrj??ZTy>V}JUML_E-SCu`-!*Yl=hyj~8Q z{<0Gv2-)eg=LgdP!7cds_ELPA$iaU9Me?pbnJX^yz^CgLLehj9avcQ@{*MT>7o3{T z*-x?k=m{1poT$ck!KpnRoTU%kLwGWh=B7{EEawu#$NGA4~i7J04Vv#Td5h4IY)7Q-DQk}5&zr@3mF9GM3V4WfX^F8(C$HNn%j_0NDM1BYArbv^6 zjfyC$cL!0>V{%GkG}=84hZ#{fm`r}8IG`#Q4%j?`8&)5*SI38kVlCL8d~Lo@NvIp?v^X&38z_nthk zmgbLqpNgisy(X;%7Y3H|)Ui#ZzEydN-L<5C>Y< z!*Pm3J!m#q!LKeSm@IskpPtI|C;z#U3$+^b@P}vO@oPt8()Np_r>_K_L^wkA~y#00@X}g??&+CrB?jr%TV`m}q z^@ri-%pVZKHL!ArB>%M0K{WGU0Q?6#$+J%dDBdn+jg?zP=c?|(-nv03eau<(*LEB+ zjCv=^H#D<)s1}VYL_OlaTP%1^ojoR>8^NY3zh{~D32eeJRgf+GMnX6AL4kq|&IvKZ zw9*K8o!$ny;)9Up)+f?RT?(Vmn((Zh31BBz$>!|(&8mWqi!uy0k$-ojxbGi1=nb>S zzg`Q;tDnc9ZvSYQlqt(}+X0k90w1mTt?udH}Uf?h{N0_eqL*R;(fzc9a^wD;P znMWRoWaInDhbP0(1@y@;r`hPHtxoH`ZG|~$Bwigy;K}+i+{jG@{sasACNi6Cr~0U( z%%P%Y56=H?4%SxDu=}Spt#5{7fw1 z%}HB4-Zu=++)&1i)&|f$U>}BT=^!PS3PCq=F6_SLjvKumy+fRyLCH)q^YCS^YN(QjwG!{ZS4zs&kqDZV}4tyB9 z1I>FM!{yLVct>U)(|Uh`Sa@H?nQjtv;jjd_GE9W)U9EA&SfZ@@pV6QLt^A{r0K zAkhNXF>OaBN+dmngEI!ft?9x(zL${YhLe#kdQNKBw88g@kuV+q zgV)kyK`&$)_5F7h5Slyz7c_izC!PMc@(Z$p+lF?(Fdc1;Uq^t4La`% z{;#i%%;wE^SUUD9%nQBBq~=xQw#~69_Fq9z95UmL8z+tRC+@u4H$f zeW3%ujC2Q>mvkF13ca$ZMb_x2+=+%=0Z=g{l{=h` zAy@0d=*`pe(=>`N~kfVC*Mx<@(|Hqc55*Ez^UxvNmuP9JNxNNpzO7 zG=G_Jh-NATaruxM&^+GCBJHNpX+0xgPRl9mUB8UKzLz9AEVrGt4t{_`>eK`eZ337c zOXCghy_hp(AI%hJ3ti5EwD;do#E|3o;piOfY`4ai&|H!35(oaa*B(C?oMjm_9<)w% zu&}SsVNLlul#TbI+NJVXpE8oR4YlSQ4enBYS=UL7i*e5ir)OyCNR5PxPDF; zQB>_04Vcc@M~OVh(mw;mMP4lPw>7NXvyhQILmV*R1WwOW#2Y5FxXmwpVILSmG8T@f z+sECn__C!FMmgAF^|n;_Jlj^BbzT<7dB<8i>fNrWYJFuL=2L{Hk53d_lW&OiUvs`i zLkvT|EQIr}IS{jA8;`1dEqbWc1B>5C(KWS4*-e=vEMdl9SU>GP+^;=|k$1L%SJ!vu zHRB(6H3~Ttoy9z2k}+L-%9N=69s(;0M>AiM9emn5ica4-4i~lr1K0F`u7C|7<D0pAVE$8y>ZI*qX~@t-oiy5>DQR^={wUiS?a zp1lUQJL<@WVZ-Rgh1ynSXReb@HBAhY8Ne^Qy<|(J>%oXv!jvbG!da&iFDhk-*9d30 z?vz1vsa`fD1wJMZLyB08tC=XNeH6E>k3`KN5zbN9LkpGl*zhR@hle(!;mtOv7Bb;F zkAA|stX{YgqmNaG+nDnH<+k1Aq4j-GX0%;JwQdqXr+rjmLChUZ*tt?R|oV zpkx8Jgl&DJ7iUUfCC9)V?;nxvk22=T8edGQUC?7We~gV7$kqGgtW?ypsaTWZ~TnIx$C~N@-<}`{Pwfx!Am#T zV08zU{ZG+(_*415aa?3YBxRM6LWNMtIQR7^?MMTq5+$klwrEO`nN1m`GD|W-B%J%Y zLqbxNtfEAFptR@j`TYgQ>zv~}_kCTT&-;D87^C$Q$%1DxP@B3QZ&(IHMde?jn6C>a z+2!!8=DI~(xgX@j3+XdWp4oUcfrQm((QaA3&$rJWx6AuMSEdw1_Umy+7iQukTOCq6 zRfG$1l*1vZWD;67L=QhaPh|Mlkm*myiyQNC+3;E@Yb%6*qciY~`YAAR5{0)J1;Vp$ zZ_?urf{2b!DeOApUb#fA3-w<7C7SsR4Qj4Ix9PXaC5sNg`*suF=hgyBCZBQmekn~H zv;?jt0Q&k($e4hw5UTtNG$0LTzb~UJZf2l!zc}YQy%oii$77~#7g=B40F{z6D>eEn z1(x%_lTIl^+~?LUczQ1k7r9&kg#snex*~~Bc1%F8n~`w0*BCozH$b}AF>stz2#@(r z^=O~}ASPipO8}A+1iIj{NL_0KM+nV=n%x%pN8klZs5C^c6!I9302H% zF=k#oT;$A2+xvWY{M--5@O|_EbAMwj+o*UZOs?D0$Va?New zaUQ~ZCN>a1URpr@OiluUK|fuYd>GZN_&y-S(^9EM^m*)$?0g+``^<#Qhg(6k#R_&D z&xe4;X3TDI6}Nib6L8f(k6jNOSinSnzW96-OqCghPnSO@U*!10pe(BhV${6(+>1qUiKU1il||bw&p`Y*@iWzX(y#aF+Kb9s|Sb>+HvrRJ;OW zTx9Asq3=*M)!iG0+w@M8qO@B4o$pIFnnns+9U=s~BZjfv<0jhv^oO#ymKfXQ4G&&~ z3)CtOVDAlo>{%OwDV_1eR#cSB6>*38<@0$TRSJDpUP~9bUIpf&fXm&g@M=sFSScmI zct>lT-rx*s_G0ulzqilGFD2UJ^5E8`FHn;#BJ`*zfXw7!) zW=3M<>tKAYZqD=6wIF%$BTW@8Cbws-!S1JDsGma_1g(Dn3$99mZ`(L3|7jAW4$dP# zbz)G`!wG!0M?u&TWnp3L0O;|ajstt0u|%O0Usu+k_v*2n*V11kK{|^3J6Qmc|1RPB z>jfBECd>bx$>NH>biRX-jXJ%pbfE7sZGK&ff;nR`uh9xlXf_j>;$U3%PZG3@_kg~& z6W!GLoS$>)aBE#!$Z9PqCi`^;Xv=Ua$QY#iC~D>EIwj;n)rHDCJ5NyU;HQx6u?9Ze8jArp zs_@ti1_itG1ZfYJ@t;#NtiCQLh*tPPL#Ld=X8JFX|joqeniEH&8jjjcFfA0VGTK4GBp5R-?D5arMpD9eM`gQom`u4pT+LNw z3l^Be;{NlOI>t0fy`J`LKZZ5ZJda?@67Hqicd9z-7|MOA6E<7Rv%K;) zuuaZ_kQb+g7QCxVCMp!G ze&C-!;WS)tvzMmxy9u`8Hn75lRH)TQ^a^6>x0ZIxYxNUhMUEGdT^LO4 z>VD=Y~tb`2~qSGB%YMTCb=p!YT5|Cze;HEff(=#wpyXn>Knb8 zE8js2RMH{v%y^h|E|m_(9R<2qjH^5l2_L7NwH({L9Ay6}pk9X=dAdItH)(Ff^fR-# z3xXlk-S-UWvN-y+B81NZJb>=O0%1?Y2MEZWK&@@!&_61QCT$IeT9xZyJi>t)d+=xC znHnHc5Qx)k>&X9^u}2p6p@YjUP>uJceQpjse{YVkTBA;I^hq(#ACQDcUuBqw{eLVW z-pn#jYzHO;8sOf|UxWvjI|#0>K0pT+Ey1o;Fn`lqft12+@Xw#l zqKj0y5qfqIvL=?fx)cl6p58|Pn*0&&n*55+IdB4Btq{SkllrjSpa_!u{@|9*ZaUja zpYJ_a0+89FTr%4iE;j&GOQ1MNRB8z5FYOnWg~BE z!Y`hcGw)P4gva*?g32FZhol@=TWVkVOY0*p;hiYMRSLu_<_B?kP=hv3sxV7O9?fm* z>1{_dby>JCbG#(EvRE=0ne$)phv!LVI$9uVSQJ|Gu=XQwxPF+E?=8UW-mPnYQk>P z^XnLxY#hywj5XtIPS@hQr;O|Ibz`?QBFLomw?Xkg9-H@xC&EV>!_E^saaBzfj7X}$ zCjmczD-&mjV-G=R*jnaQ^a`VM{=&MEL2SM15%>_93R9l_D{lflHZ&~Ftf$A3j|H_j zx=)HKs@4(d!+QlH9mnzKCRJ{E{!#ofyo8fg`io~@6~K+xzG%LJcRO23a&I&}@L+ck zUd+4!&tqmW>$U(gdxaF|F#RKq&j`fcRoYOPvSPpHU*kU$-pTq|9X7Sh!o?HQNbuaz zWPANo?sHKjDH>SHQZ>a{{uNc2`ELd9N!>}}A}Z;5o9pPl`Vn=Tv=)CE%_YT|`|KEGDzZk!PSp()av%)?jT7Eaac*)A{UlVc{CAjpVy#Izu3A8P7R+fh5O5s*9bDimw^!sQ}c{JUX3 z{{9_K13zc-&yzY~ZtZ{NmcM=2>FyYO-DfKB=lS2_^Y&BM8Li}I?sF8WKMSFcB)H5Y z6S;z0%ji(&F!`)qE6lf!2mj|5q->R0rF(>siiwDl+wTI=byYCB7aW6Gm-RVEiPp*! z?Y&?$ZWC0Sw8Q4QD6EyMfCDdMz_Y;{E=7K$^LOtA%g0%`yq52#{ghzcog;9`1xfhK z=W^3W+AxRa>sZ&oKhvK7hW6}ESibrM9WmyVu-twXH@e6LbZh>?PkGI8C7&4`4hjunU3$+DqjQZKD^S^a#kRBL3!JO3aH#B%UzugV7TIf z@IR}3LFaznWpelkZ5M>n#^Kw7gWr}Beajs5v{E5qJ8r?>Dt^A)uZ+Vo*CDh07nqIj z7B=OlqpqGPhIeeC+I+q>&;BLq^=p%(MS;X*+9_8 zqjK1=@JLgb;A!e0ICdtYiscwEo4Q4KfbTdpiECrsTU8vKGM1}TD%J%QmD21C})6$!qdiHwA)5#NfHFnwa(@7ZaMl z6TkjCI5**-u&5`R=A7-o@^?E05zE(scO&oFZu?2z!_>4 zdI&iif)2x8u*!ZNTtBD)51iNF3yb>%4ab7W^mnvRa}-=3H-xnX5}ZE&8qR8QRNZn7 zD>M?tbqli5C^dw}%uvOv>jsEKyBJ}Y!a;MbBCGmfipCF5fR~X6Ou3kcA!0XBue6Xp z4@`k>GZ{$Tt;bwbltF9FYmWS)}72*s;)U z!JMt%JCKR67aqB@k=c`2>TNs<{@!*1`C(OX^5j_4*4gAzdlnfx|1E-U8y)IhiK_h> zR%*k~E&m#k*?(-6OpRo(~ve882TM_P9ei2qqZ^EBp0lXXZ zG(^47gAfKoIU(O%BCR!d=BlP`|H$pQp6KNS=3e*fbsI zUyFcFFfrF{ie5Q9Th)#Np8zNH3smnT) z@s#5z zx-QS?T2XcMg1KH(21su|Nh%yd$lre# zah!}c>{Y*qCIV^Pqv-?v{s@wJ=G6a}B->EhC%kdo4+W5x zG{kPD$#C&n2)VCvh1UHGqPJ%6rHi9=ICbSS)TpzIRGU`g-sAB&w4#J&y-@_eE(Me_ z3xP@DzXcxg^RP>+mdf1T3)fSuiQ=^@u-$(y?YzhLcy8RKt0smEwK!?=;<*kUI6oO? zZJI&rUgV7`4;(rxDAZX?2QNIUe3+umoyjUA_UWr2+fBf( z1fzp*Xal7vuT4bI~SHI|n^G9C`&u(u+ zx7Zw*qc6i%D}{h?|2CFAWd+(k+Q$NQBk3XUt*EK;T4?I{4^Jt@Va_ZWHsf_Zjc;|v zl?x`boVD*@bb}iG+IL>C{NGVlbaEW~v|}W*SsjD}1vTW#AuaN5nKQE~-cE09lxI&mRB+YM16uHCG1ocf z2y;pegNM~k_&98Uu-}2g*e|!p!BL%%+Wm*jT~-RCZ*}6)!_u&4)C1hElLS3ZL0tNu za6y!rIcMjr2j9hR;Uk%PBB6O1B1{^=TbL-^^J)d|Ps_lkA4?#EZbsEx?o4spNH+Id zhoH&M6vv$WKqo8;<78$evTx#dptkS{%?x~m_6Jq)lw~+87}`X7MCkQ@ee5CB+C|M>XMw zMp1ZH7KOt$dsuw_L~yRx!))6ja!B_lnJc$I5E|0X{H-qv-mZ~?8Qo{GXMHuEG~Uj> z4XLtCIVJeAr;%(w(}kn+W4P@SLe4sBIUCb>6(xVIhT^a&c3g(vn6R0@uvg2X?SaY9p--f zL*i_V=p)4>Xyo^+3%n-7M5Q$Bc({Rco+`y|E7ZWXW%JnRi+;Ror3$M?He&6jGc*}4 zfc>owqH%I1&bxaUe8fC(<<@SR@mGtp@V)4ardr+Wq98McLL)fJ@ zK~(A+YGp2q6775sGx{XVZ|I>i(?)a7t>K`|d%H&E5WLoV7UMSv$y||AjCkZkR30!m znUPC;I_I%7?))7r^$}e2iKHhM%R=qJGID-_Di{|uK>I8Y8f$+5)c+f6x%0R;3fe>1 z=Vy}aT+*=RS(j;$W&8@Z9@7C)$GbE+b|L3uvJsZ6$-?Kwd=R_o4Ek3Fkub4t0S<0u zE{*SqNnJYT82u51O9cpm@>b#F9Cfz+r2^V+86xc`=5m>?qA(P$Niz~PI7^lDP~o^n z@UA-^+IhdRSKl}K+M3UNWhGezrY}3% zXw#xR{JMG)EuC+LdKWxdikKqO_wD4T!5A*d*ny6p6OX03A933xO`>sIl}_Qi;w`~< z;AO=^+WR&adJPNVShXD56~=?3j0_9pUXg0!1z2$A7hW<7LEaX^rEgh{4Ut>nKb18g z?I^(|4qBk#D1uY^b>8#jh{mrfP->4o-to|2!T&n2IfiHK>+Xj>wN~NKz;BELBepDf zAziB%N%}6%<%-qr!D{)fB*Y_J@NCEw)@|)4v6iRdi}Nh#f2EAqk|JQ|jYNFX_lLjF z7;;A{b}{pve&C_x3yYthfonFp&~LmMwjPK?i~FgVcv+Rr{%-~;b9jb>ha537w;O}1 z3Lqh8D*rjxlcj1)aa`X)G?^HV%VS4!M-t@7!Zv$w-T0o*uZl3I@)lHoG8T^N-vhnT zbMUT61H_r<;KvhN1i1=6(8lwfF5B4Q0gG12L zHmtII8=Kj2|Zql&=_$Z=9x*tCGQE`%LNIziYhve88+2|Ag)@@ZgwsyALD_~2P+M0)I!^tgwvENGc7M6W*e-u~aL|>G z;rzjO|9)s`;&Y+_Gx7ST7ZBHciR@Kp*cK9nHlu=}r_&XbM#cz(_&npP1`UYr7y!3z z?_jU~SZ>Ub0Q~9tlH@KnVrTYfaDji^>1>0|*sE`P{i=Zk)Os(3vk%5Y^yLH?oZtaF z)#G8q_8!=#n+X3rKME5w+Q`47&GZjzhMG;6=p3I~SWy{^>vl(iv0AP$YilD!^Ru?Y z*}*8DYldyPF@jcS{v3F&4W68v#66aqLiPp+W0jNv{w){b-m3nj#etU4x$gsn^WDu6 zP6GU5$LE*>&SEv61-d(Y87IF?Co;iERi^(YQE!EK{j?TWzoQc*YfW&f`BZi_VkFEv-cH*DFGc^YW?WRyi|6NF4>?ftV-@wD- zB|Ng7z&sQWSo9*O=%qc^l9+Av2BC~pC7r1has{VcRX-p;eRr)b@4^geCjo% zyG_Gc-Bs8QqagbJOtAethZ|Zuo$2g7j;@{8pvEr+UZvb*8XqjN@p(M0TX`R>^;ScO zX{kUpax~l=3PH2+H_7bJ$3SdtCb3v@n0hy?!N-SoVgKxIn6gtGKYl#|9q(ePc!UBi zdUz9!!}T!4ejZ(S$c|R+&Be#r%dzF&8pyx(yt1U4-*Y<*!&kfmi9v_Su|rF+y*m;% zj$a6K6qV^WOe7blej?89x3T%#eHaj14v*)i3Lo#zqRG-8c(#AoGOA-byZ(spU)?W- z<A<%!XCU^MIP12W4qsjF zgWydWSB2K`2kM!GT^FzHu71{W;|FX3ZCleI8hM9%w4}wZ_yAKzx_J> zn|&Ty-IlO~@GQLO0GyTCWBBrXKTSHUgl{!>fy6^aVcd-fbnqF$Wv*0%my-+e?SUtl zpB)7n9`C?uNde57aRncE+`!g>6?985k|i;@!gnWBaCKP`oG_E*5-0HPi$|7JK_(Vq z%2u{B+Zm^NHjyuW`B*1sh*GAR>|JX-{x{VM=I+Xct8G(}3=#bA`BU@NxmNhizZeUq z^X%tFby|CMC7a`Jz=AY}Xy+Gp#gBkYWf$#eS|YH&u#lU^ z=bV@F{Ww3Kf8;0j1X3<~psV#Fo{eLJy(4a7y=)!MDLBfm{_7^|O2lB)%lT;iFmK<|fSE z6O7-B%dw+z8YXyqY? z69-gA^8n9)3ZB=U<%U)`-t%=;QF35g#3JI?mI67)OZYu~x6tO{McldiFg}`mOX#n0AI)B*;IUWF z;6~32GIa^hehCUkW7Bf#Ix8A8rxk!w^hopy8X!$JS1qe1e*r6FM{4m`5%RWH!$y5X z`F$LW)7b}!``(hq9WuDn>b#(M*c*p8#Dm)#Nr=BZ4bzg9;EDEPFg!L9Wv2YZPxmk2 zf!&iCo5QLfjYihii4YPk(Wtdv8!PLntq(m zZS_#6EuC>d_bB77P$z6$Gn!@nt`qJ&be3$%ISMvs)!7PrBu*9I@NjrNhFLFyP910V zZtEC$Iur`U|CBNL5R--J^Jw_&rm8D375Ve{hgu0H$#efp)D$+|-5e4)i13s=WTy?xy4 zCtn4!T3ZQSVE`B3j3?HU{kX?%JHbFooPMt2=%ijFI9{sFGA{xjDYEU z4`W^LB;ojGZG3Vr00RA&f~rOWER~64ukOfl6K?;YT@mdhMj?*YecH(sMq9GuPyUi- z8BP4-WWnCw`vG1p3%K37{CoY(0+-o^!(S0w)|FU5KdFUsnwN8MNOMz#QPp9Vb>dg7e5DgX6V3{%LoSlUM7bh~FS=GKIrvrwB4ria?e1~i* zJ8M08zF3W&S*wVTf;lXU8Aa#cO2FrQU-bC(l`t?OpH3B%w_vW{`I&+{-kCL?Ei;jY z)_up|Wy@XaF>VSLY^gzqa%pb1^HrhZqXP8RyaLNd1Z{PS!B=Vo3@~F^dk6>lPUP2>%ik@k!TeNSkfL@2xcjZx_wy25T&2GqhRkb88jAV&U?2aTY{(LYz{~fjYT{!w zeConoZi=LfY9hF6ZLattvJF^zAzBVeKuzjLB1nz{y$cbRj+gnI(wIv)7*hn!J-dj& zzYNx;bb-eoF*c_x1y+5mU^_pkb6TyBZ~~u~_e>}UBkLDr!h8kB$-l>)V^4$lVp;rm zM4j)5-=*%Bo;d4>I!nzv!Br}cLWeE(F#ErFyzAqId5^rYz~e6(w@yS`okd`Lxe_Nx zDzYsZ`Dk)(GC$AEhYyn~q4a$}*_yur2e?mgFSM44q{@*|)gP!)L?oQc{44l+b%r2q zZzs-c$ih?WPLexIMcjmuFml>glJnu2@L*XiRg!nbInQrmK)oUU;ys$%-z33| z8wSXn)Np3{EI7h2q}>PKT&*UF~$hFzj_X~@HLIJ9sg%wKX90?yl0+Xfq4DY1_}vi%KRg&Ww8*bJQL z-$b8p%CJZ`9}QwMom5SnKhG*eW32o&s17&ejM~nSA1gQG{`^L=)$bCl=kFVBYYn)B z>Qa1BDu$kcdqLiY?~Rii!u-xK=*}Pvz0SdUZBcseg(T?O>OzXqAni4IkAnCu)Ml47 z_x|Y>xVqhvg~hEPtxGvvqFVqd=4wz?H-a_nh`_8EF<5janOx*ExewJFsIz}2G|!#E zKP%5c(XcPNwW$F46^%EqH47g_bwb?hsVw{MFZytYF*a%%N{hFzIU`nl9>txb-zqsojJV58s2&gAz>j;WSqKI)f-(n#~T* z)`W>2P2}h9_oOpwJiP+sdFV9ki$2A51eEL;gq?1;se@M|NqAjP4=Y zekC?)m-8&y5WsLJ6t}X+aP4rC`z@AC9eoyM3$D@eX303p>;!T0JW2m^xx@DiE>bNC z5%@ElgmV|+?sK;8^Pr9|O(MG;PrF9(0+Y>-?l z4+EOJaFeA7PFo|*KJ?VmFZ{V{O-(pjmByjzM2=IFldP<4nuEJjuAnBLnO;*V&CM+g z!P8G)5ChvE(7Z4P@)p$)Q^y%_h`%!!Zx+E1+wZ|BL%svVXH(h^l+ZCce17O`2qbKg zqxZ%&(J8k~pnECbJ10T(NZ&wZu8|CQo2z5@Qh7G!%M*zCvjE1oxZy-2InH`uHrF)z zG;AyIK#47~czZAwHdkrFtl!?1DE%51mPHF2<~!48>l5JPt4v6@Frz^!oiyjD1bn|{ zgvriLr8^(Da-@WCoyl#_s|3}lm+16^ z8f+r(w$z%x0<=VoNYHLIn4X!&RtUa>zS%LnZWYSpkLS`8#*1-Fk35$)B@W5@3#8`z z2yR09I(VXP&#j)W#;Tue$BWB@;g|0i-2G<@Q}$d>d@3tpt=35pR7-G6hGNK|UNDYr zsYSa?Idpex>`(#vf%X2oXWq}n&+%{OHNIpt)wfim}HUoENi?7?WGiOA+H!;%DT zJbUgYwUOG5!rn0Qm*?1~?v_J;>0K!D{1j9k?FPT&_xYV=Wo6i7K3}&ZA5>ZjgsE3P z3pW1y43-00G`jW#d>B}Ud#<+<+m9pRpH2{Z%P$a~eyonoKi-f%b5D|$2UZd#k=5Mu zUxV;Er2-ELcA(DEVQM?vjYH0+l`C(LfUve@Aa+=n`&a%FlZw~TS-A4>JouG zt`(ZxkC9)$8o{Zv29^Fh4j(q~cc$waXd3bXN|Js-xLq;k4~@dOxKXI&DrA3TGe~gS zNUm#73*oG4Nc13OoxagjvrYw+rpR;0GH(k-R&@%(6BrFhEy0@2o;9Ow4%OrJW)5vSy-eIeeed5L+%^!175G(SLs{ zy8kkO5z8#l*vyw2N+^TA!vxOiO(0xx5Q4+9a5le9i~Tfuj#aViNQCclSSD%5Y63>k z!jVRTgd;2P$*&}f@9li2bG$BC`8*M5EZV`U3a{W?B`?+;G>K=seg(_R^5n-=fBZc^ zfUePr^8wHVZ0r6Id|I*GYBlI8RB0*5<+IQjG-E;{jx+}!_%j-3|{ z7n_qHN#-%Uy&sLAE~XGs;iJlp70y&|kpG|8`%dm#FUE9{-NI5=H(GYIpX$%fv>34G zxnmD?jaj*j{0`D+T=@9Ty5KxKHs9fxU~C$UZ?d;ab+lD%Ge z6ZeNFqiy;QXihDsKdW_l&W9vaW$l9x7cSB&Gh4VW`4;LIYmnnxYoU2oE8rw)5O;OJ zPgxh>*GD}BtH*H1Y#!%&F@w0+DdLBpVd(u^maW<}os$-wOMaWGv+3djPB?S5z`14; z-kJLc1Gg#QvvC(O^Og#Gb}<89r{=@+Lkn?gp$A+qY{E|OQ^FaZk$CsZFO;1*k?`x$oFD}E{xg)tc!8vGHlZR^S3EWH4$K-2kxb1so zNl^X>_Ea++9MvXq22bx|^PXPR5xj>LEr&4Xgf`p#@3@78+#W9T{7M)lb`pIo)YyYA zejZbC8s&sq&=#47`1d~1TFu~K)>J0)Ac5|wRA4Dr1VrjUCvoBDg!3nl;(E*4>A>Lt z%xjN=-xBX=ZpwP@<@*bC-}$NR)Yn2`@Yj``%qT;_R!1Y|#DVM%o$J0&_A+mbYZy|{)Kgkhv|=>z)mqXzeN z#59hrPNcSGslfR}lHz-B;o6W5VfFh3M?;sww24P?ZUn8I;wTT#R#uQRJw+%r%bG2I zXGSaK{VeCIX>qH^^yBu%d*F6Hk%ZR`flp`y{If7*m%Aqrty_LPlasW5>;}Goas1WQ*ISZ>*`1vi*uFNnFh7ozb+Q?#VPj2lt?mTcd&U9clO5PViCa^AE5(W01J z0=>2pUB(vqpai(_~4Cmh!p1qlgY|sS1Uz5VK zOEqZ5)&=;gi{MV*Q~12llRX?5&&&$M(PVxsDzuBSt=daDTb@Tz_Dg}A=9Y@)rtfk0 z6vi`HwP4HyZDjs)NU%EZ5Lz5V6c3MP0-sn5^=nS_o;vT-p8toa*XXeH>tVDcG8~iV z8^dT1NqBLrfIdrGgP&H)(?4PYcym#ly^~moiE6%hB>#bB?vzptoZE~Od9J(Nr3~)- zq?v4l+dG0-L zDIEO0g&7v~!2Fj5%v$6WXp1ePx~+Eja-Icafg8X$bdjKM+5%YYo{bYH0@=?(@#j<^ zGo^Mx{I68paoTS7)L~VkO!N>Sx?L=rAN+aL1N(Cd|-NVk0-Gt53#_*o;P@*)O zgui%!NA~=HC9}1-5sgL_Gdnby+UYc+`u!LS=(>x7%qzlHld+_Gn=;FBcr1u3J|O&S zC4*wcBTz-;0yYP~2GMKDbl;K~@;)(`%08XR)jR;0oScN#E*~)b?=SKsr3}n2#o(s% zeP}wS793`51qGe2RBh%hzpAj4|3UZKnPU^*lA z4IW=`POvLdgDnp=$M5Ay;O`nD47D=jn1d1(yS z)fPi{G`zzHzkbtQpY+*V=PtMzeE{ok?_kT--;nI-Vx08Z7?kpMgE`$^B*&!_LcVMx z3CtK&TTel2Vi*|36jFP|EqF%Fg!_=Q86!WI3X($Q`E&nyc0;LF7?vf&_18_OU!*+f zp6xe?a?WM^IhcZH!z@tqUZ~_mi!Hl}hVAJtB@Vi%x7uTMAwdDsehvRYx`8#mcEL4 z)+CcYTTuvDp)brC7hGXC=!Qm5PvAITDHy&vk51(|{W-GxaZXTZ=(82qm@I$}*9m(r}zd`0EZ9i~cmdW6;|-$GwNAlyAG%N<-kjKz0D z;7RLIo{u^K^{VGXQ_2P~*z*m%r8&XJiC^gTa1l;NwUDf=8i34`(eOp6i_x1p(fsZn z(7MYzTz~x}CcP$t5uSYZY^FDO>J31!*gE!h9mnY}zewf6HVSXJim^{2#rQt`3Q6~_ z1oOOSm^|Q3Soh{gV&uqaTOd#oVA zs;8^-Zftwf^2io1kKM^01drqnn$4@+M$S^P#vC}HzZL4!Vj(zgAIq-I;bN{P(P<|| z@%O7}R`9V{*ekURZX7_!cz1~RX2r42XY)DX<30GGCWY-jZHI#o#xmsZ6`ldsxI*uq z<>KujFk#nJIwo#3H{0+nrsUUSU%Uf}?$sdXYB4Nt{6m4#>&-m3aTb^HR*jxrbwIeU zIsi5|PC~s+#gH3SgF7O}zey3Sa9;z~fD|=oUx5-+QphL&r|`UQG`a-WqU6jH6jN=aSJTZo zXPXSmk%9h}%KGudF3=fs%pKYI;kxqidckBfpF^G(xS9U_`wZL9m=edn99W{Y0*3hC zzZZkiFi;`19QeVYddo=iNng!yEdWFB&9HCn_Hj({VeRTf*`PfV}p?uUT!P>MnEa71Q*>?Lnv|Lca zIK3YtrdaeFk@$%h2wx3&{2Df|)-SF*V6X%j0?B&|wrr zVmCCS`4WA0&^HKCR2we;(uY@a=BTN~_dxG!(sS7d;ONC9q2Y{3vZW^z9{-%cNojmV zZ@*5WqgnuI@50dGiv<;&zC&9lnq!gMLHuSqktv+_z>N3m%<3`kd7SJFxBn6Ruv?dF z3SPvl?GF*By{_=f<*Q)1WE@Uiw}?Hl5Ta_-60jbth3=n^6t8Dnp+u=_* z%4j|q6iDE0LoGb~IuB|dZo-Mbr8&_hNH@o1%ls?prb;sEAso1b=08g*q zim!d5p}}}Dt>yN!1kVDH`TC2t&#$v|{w#rhx;X;HOiB7N*B*a-eoPNfJOO{xUf_T* zMmRln1HG>Q5?)Qu#IA{Npf7VCdYvg1uDrVq#>=U2mHnsi{PB4Fwo0ErUxw4yyZzy; z`eRt7K8+L4KF2c?^ND_nH1kv)0~@gY5t++l&G>Quey4-*fi-U+~T?Rd2GYj@5Sd#pa zQ*gcGR5)r`Dl}EPN$7t@+*zsT5G+26%;CAN2l&qLs*}a!zlcB__UynTnwz*;yI+u5 zbt>EpZ)uhj!J$#lWOk(G7#rQNPFVJG4Agli;SPmuaIUz8o*uIw}pX}E6cJ2E514Kp|XM$NzN_}5(@u8s&>?X<0l~073$EGu*BsCaQ zE5QXl(qP5Dyqm6x&stp_2U8|$;^cLY=v{~LOrmuS-j7z~J|yt&poml8oFc_`rbuz) z_s-+4{N*_|ijth9uM~ctxDs0CT3YVv`iaqFhe4-*CdM?!f?&gYvfp(DJ{3>Hn^mvS zDD(-e3=1K>(YbKvni&K%=g@)ar|1~dP4FdL3QD4;u;a!j>4z~}*$pu%mNse+h>pzS z%CrsH=Ml3w86^$;^{yRmFMLXU_PrNelhornY}atpBXh`1lMUEnsKWkx^bT8F^_W8M zC=7=W0m~;`6%4+>__jrw@~NX9Y}bZh{+>@A@5l|hV+d^ zL-&y^;!OZ%#M~#g`M>DzD?d@rbFi{H>jcUEbrN>0kmN3h^Nj19Shd9n_@H$G=T}95{n0Hb6u*q-e*Y0YsZrz&cU|ba^bK}={zZuJ z3ElE68?HM$!m`~#mLsoz5E^9ru+c%uxYSJ+Qg)`|YXwL4gotuAXLHz&qM6)#r9@$2 z#SF4!x<1&ykOJqny%=}T?0*!UhhL808^=qfq?A%gOH)#)6nf5eL`jK6WRF62_>w(Z zqA8_~rnZrUdd_u}6iKBaglrilBD472zyF};)$`ovKG*g6yx-8d=QyYi%40co8r;|U zDU%gF#pgly$?QEESW`5cue=;i77SD2RS_+CVRsj68aNe_QqJHm-U(A(Q;ApeQ?X)_ z2Ie}tGApq>?{HWR#}Bw;w~`D0HHy-kH}~@1tq16Q;hFl{E{yD(eSn!f^?|2}{&;O$ zCz0Hw&cS+XCc=*rmeft*dIgy2J--gnOLN_5?zfWYZtBAiYUxIbZVqneI>9FTi63Fi{1J7y$ zy1F`ujOac<{ucM3ZbBc~r?CQxF06)Hc!t%1m)MkcJJhB_uysTdI=V05Q#1}yzq^7% zapnrzo12eobXu|8T9ZF{?}WWe`&oOfIZWQ=!e_PVV{TJ6ok$(pYUY&HmTw4 z%pYv@D^EVNr4XFGoFFSc80TxA!V??6iR`GZXs6r)@rR(nu+ru&_T1Ly3PJ8H2JCo} zmcUV|5L`oH#i%H88n@*%;R26Ae16$4xGykBe}9aJ`}6CC-sWKb%{3NpUdSNROxA+# z-Rl@;y^)s3&k}w4=ftamEoo7kB_BK^3h!FKW%*H$P;cN%V!k#S&%Kt!)RklLmu>|M zOiRHD@BgA{=P?**s387wtp$uNieXg!CzvO-3F~nTF3C6`%0F)l0=B{L7MYn_5S z+s8mj_$1*O(*iFuhk^r6$4Psw;R&6OcXpp92Np;}$JJkOBWs|L=XwA!zK$TO)`7>t zB4A3hLG5%f9G0mS|-oTUKMuq9s_mWjMc7Ld${|Hh{?C9Fzwr|G;wXXtcYdM@J7oPPQ~hi8jAkiYGoK!h^O@_O$vUob4XYDrzK!zPS|13tJDZ z)%!48*O31ElnM#(cyeRh2-@o?Fo5T)(rJV0i1vjw@RUnZ1@ky|WnQ*u;g|hjuC@%@ z{)ItZ+QePe{I3AwB)fF#AIWqlZ>Sl8gms`@w z!dWtHxH46C|0#avZ%PvG^{}liU7+GT6Ho8^O4?UShz^Xq3@_^o#O0flVWf6G28XzJhSArmStGY1Q+7?`W9OKH{i}E) zIZl$l>GdS92fsn{#K*Ao^Dx}h?FptUPLUmb=AdJ-3pV7R$H3O9VtMluWJ*vGOMVc| z4~A)j=gd=ZchyVS?RSkht%$?u`7vzMMxlQmd=d7mULY}>lJK~#6{hYj!Y$o}WZ1!S z78@0gV+`8G2V0$pi|Z`RA25MDWJ|F5n*!h6833PuZNrGoIWQr)oIE`~fESGjM-auB*Tn6U=q@g!EOjcl-LN0U#dg&sr%%l>Evj{CFW#t9id zM0F|8%bf&M_xZr9AX)e_>Li{t^x}WJClLP|D^Y#JN?0I!oEbZ4iYul5gTe<+d}_Tj znIIQOmdzEz877=Xt+EiE`WQ}Xt%uR+>&bG7k>Ig53kJW7#)JNfWO?-&ToC*i6`bZ_ zb*?^+OwS~mmLqz$Yw7>r@@$c`^k-`swm~D!KeE7ij9ZYv%8%m=@z|3 z=<-EOhKk;bDr7ylJbf*0{WKhm{yPTv_kqCfcP4AZ6G5k-TIiYjv$tsps4%z!pUVZq zwS~exe#{4AQ6&wYG>3^_e}Zw#1m~uZ$NuR*h56sUjNOS>N$aX)`2P3_`Eq0q8ZCN= z`}tU?h>+w~15RRt+(i6)WHENk%Yo*cHz@zR2{COWoL7p4@o^5gTv3d74jZCsas&Ju zCdNx@Gw{E1J)ZT}0_Qm%fs224!K`O5@r2wHxRiJYPU%#k%r|pd7^z36pUPx*@h?UF zdH?aoVka)wE$kLICZMmvIe2<@BX6yGjc*T$@Vt%@|7)xP8;)ILT@$|xd8l}ZvlLi` z-byrqE#zviE#Va_*t02yDKEjBl~K(?9Ivt1k3VBG5x=-rY5&mFwU`$@$pd&LgD zE|!9;(^wMiHJFbnDuJi(ol#jSPw-RaW4+`^`pIrBQC=U$rD_a_g2{Zew6Vj9c7q|y zdOg@XyatEtNjU!DAN)h-(sTw(;+-sG7uyMu2w@vHyGEBg@;2A zV}Dm2+z7qFQh!W>QEOkbS2+{NF7-S-;4Jt+gk9W`7t+)t^9T*>?t`o&uI%vkQuq*9 zfc}31z&|nz-@QG`Wu7?GZCh06nyAxoNas5Ko~l7l->+wXGwu0-f1YJ)x2w|4>x@7u zCJ%0}lI0;&WW+AJW8hO-EgC&Eq^~a>fS3cv!QiF@zg+qiw+4>m8dm?mdLFww=fK}Jb6B0W zE?@Ji3c}tEp>jeFNNe10c1~HHD~?Zu#9iy*T}&XB&XB`H3b*XKZ3Y60*7BnT{aEk3 zh0inbLFunUVdn1`)RWH@xa238t?x2?KShtp#E+pc`EoMxTp!r{LMJE3FR zXdHT}60?f$qtjOHTVz1CeHwgel*Jh7 zmu!i48Qx6ICMtj3=Q!$a&z(y_cEsT`a)c0o9O){e^~H%7ZXM0lDmGc z+-RgSnjh(AzP+R1-QJz3T&6DMiJsu$M1#_?7H$2E3H{i4m*W0{m$>Tp6bgnzKx?o) z^akvPD*2I|C`mJ~GDB`RA{F8K1md%6A&Cnai5ffod7Sna2A3`A@$muJtYuB-vaP`F ziZMvijQWnOu)8UiKCNF7%rYcdRCX>S0N)K3qiSeaOUx z=Lb_uS(&l~T^YVF=r~NG-sHH}GG1{`1@2wl!LsWOuiE7>k?-!ZqO@%V-?a4|=!XA?$Ddfi6Sp4Lz2YnkR9nb(&Sb)ao=31a zsEuv@YKvoiyrKGg4!TG_5bYe>3b!?<BIF5lKI4yDCt%sfbeyKA%kQ>)#CiAsf#_r&ScPUooUlWSs0?LC2F>OZte>%k z60PE=^*PXRx&kL`UJO%Q(;-+b7-9vd$AdN-;xjlI_N@~-jNNTylX`~OWPl?5ujwZm ztkMF1A!kzVW=h;sQ{YCy40^w$89QQvvEAS##^DRRo+K^)SFwOEe7KT6{Udmn{5!x` z{~d8OT#HpnVQg518JFDaES?tCL!vu2a&I#=@!ZcID7CND?!c%p$az-GrVEVk^oJ?X zT@?uH^|atw*9mxdYzxG0{Xrf!81XO3Mx_6QAwU1}I;3Qq2n?yg^qs(XNs=3e=6pI7 zW=7#p>m#sK;Rmc;FJ`;9PlH_(F5ux^K>zGh5-Sx8bLRSEtUPBuU)&u7t;ooYiGw&C zj%49)5qG&f!sX8EM2F<0*t_aV82fe}F0~8==Svd2_RvJCIzS%+a0w4yRm0XO#lwu_ zTCi2_GEMHKi(mR-=E$=&-Gku`4~;hNuHSg(Q)@$?BKQ6utk=C*OXe^-U5sAUr`o*N1l zf5(ATWDIM_Ol9A_75T*op`%=($Zj~v(fY%a$bnl!`P}$r{7u{%Fb=x{_OVt}fGFbW zU4gXtuL$(jeEER9m(Z_po5|04h!=8-(0g)~DD|NS87cJn6_Spzz6dWI9$g_GKgAv* z{L)c>YzAaa_Jw|TMQZe^9V%}G&{^LFmUTrcOB{Gu1`5nruKYg z*;Ur%pwE1LrD&f2Q`nR@llp&6Ax9Nf?vLnOrzz2$swfU&%r;PeH z4nxSyd7zXc52o6u@t)x3`aRs0L@NqioQU(VJlY3-_{!5hH!GZO_W`y%kfE!TFOV?7 z_f7ol(Mlo+cv}|v^C^ymIfvqv*g`zLS($FvOd`h99+M|`)hJAeh3SFbfZZLMMBn@mc55C7!%aDGZuJCsCS;)du?rJ6 zOveL3m$6Ak52CkRgbELTa%=h_<}Pq4wOtpF)E2_^MP`H_UISD6ePQPF z-O#I<1Sh{Sn2>Z3opzjoj9G!ud8>;&&J=PQM=!AiMJHiiHv&%PNnrDd7o>XOI2wD( z4-Ss<5IAg<^ll2n*!{oQr0^E#lCy`MLQkvkL@jBF3@%qB8`%<8hhtKK)nBUc7MyQ#R$M`k=JQVRrcV+A-e1EHgGGSR821=9E! zg1^s!frmJL%PfRTuV%qYjaatAxgJM#xnNRMIQt>+L6*jgc$~vD+GEY}@739`sl zsumpr-NKI6?XL${-7L)PCM065z!VNwJ}VB1&EOvljeuvpB(u%7@QiFLTscPIs;vEM zXIEZ=v0H`Q=^`n-SQ7}hZO#IlcLS@6-iulm_`=E&V}uM_EDzYEPvwW`foRoX_^l}L zAEd?5bD#<0r;b8}N#n6#$x5`d-DbD)&|XrMuYygU`=L4VH{8?rg5IEPrq&<@>czLA zaq(2OemRgNGf0)ZoH#@sSIO znDC_mCVZ0P?Xhz(=v6d6h}eXQD~^KU(Y5f^g2DR6`GUt~J;dF71OwhDpvAt+*CfU^ zLDJggSRyzLk4)Hs7Kz_L$}k1i&Ax&9f3>hnW(ht{-G{@H?tVTRmauoV0_X7FVd2DL^9r&@aS#-(#7+TMJN0yg3!+w({X0g4BSdagOYbOT^UO9$493BgN zsc-DgaT`9$d%EcSik+AtlM7O#67lm5Nt~WzfXfd2$D8}U;OwL}*q*dXa6~ubkBOhy z $Qg5e~-MQ~AsCt_@YEo?s6Pjt76K+pzbUw$Yy9qB;xgJVj^jc|Zm`p&L*Up-#1 z7=jVE!*TGCF~n)(eD*y-n{9Sb=gq5ck<8K@@+n%#TgN&>vg;+#`+NbnT)Kqs?#uGv zc2D%*7K^`XCy?r=s^A_q2duoeK+>aABxQ0cdT&l+dv>1{U$7DUehx3#n*K>JB|Ji; zt@sNjw)!(Csb&~HcQRh`e#wq~jWhM@zXHZ?o3EPh=@Pc5W$2Khiiv*{-1Q^1R z@?GSO>qNG3%|+CoJP#Mie*wHP3dzs+Y~9r=k@eD0VK-q3jW295LploVPz`rP{)cJb z1JUP=7rf8jK)f~{!|U5e(^(_#5%HEhSYtbcD*Vg9e}~kOpEcz-mmflRrFLwyj3Y}H zQPJF;CfsXtF-yC732!b9$7{pxft&FLl=}V?_Smc>r|0ixcZR+aEnU{e`ukO2Lyt0b z5$@FwTw)<%q#9&vR-mXT0H;ir7j@lBf|r|I@P@F@HJusFFTXhhSLaRzsfobb1s<2r ztT0$7J&^xWYa>}wfiQYyIb6DPoV)kRvLzFbU}nQFa=QO3blJ+lwZ;;7-uZxZz7g?N zF}ViW4-($sR_7*TG>7z9W4(wHFMeRIRfj-VIVb_@&&F>>=Q|poe)hv zlmQz@n2A5F3l~lLQv@!Vnap9gz#zGH0OIfegs=;p;)m74z*(8Ih%;I=Aon?bx=Z+b zsF9N+T12dYoqgkpms_~46^;F=V?#MvR>S}qM$ zM|7#{KNY%T{c8NErN|@9BFP6AO|rjSiZ|Rh5bG8G!>CtLS`o8fFI9&3h!-1oD;Ph;Rz*;AOb zxE^;-p9{GHKhgQCBc|<&DEsy|8DlHwLz~7GX4iU8yjHIQE3f!~b)Xh^s`B8M8$0ol zi5E`_u7Z_gjo@dk@H;*F0?zv`gvNw?^77LHQQFFtL?$-}Yopc3%fZU*yz5!9qDU5# z-A=}h=9bVOGGUdZ}VEd}!-1PNMSTzcuNc}sr z4UT5h0tQg$?W4Ho%y5`8^)e3Eo zcK2p~GCfecIUOQWeYk~sw%}h1;ey`4qmbf0|o(641xgsC!rQ>m2zK~Ps%Dce?Vc(H#a*{WiNs_QwTP`0ME|$01 z3>i+wd|JGq4~hO;W-_1~RWxsq>GNdiLyvH5;x;_Pc@%xox|GL$a-wf0hQXR`^;q@6 zm@aDggVIwM@Xl@3q{Uv~>Z}U|N%eQ6!BLWb(nv&uhE{T`C!3YIzZC9Wl_YP+5%}-r zIle%C4OKtj$|pX3DY`C|izTN4SMKd0ax+rF#M_5Etj&jod=S<6uFJhI>S14sAq_R; z%%PaWlL>cVZ0>Mq{F==k`7!omwGYoN! z&Zq}KIL4x<>?jz^+}MibNMQ91Sg0M$7Th_?&*pJ>)N_LADi5N@_3QY@V={apQ>3+a zoJF}H?w%b%Y=5=G{M4y1Y)Kg`UU>}PNZ8}(q>H$#IhY#?Ztdu~TB4)+8r=Tvf8=8I zcf25b4nk)nVza&+#P2Ypo1C68*CFjVZgUaIw>$*7g=ugu%M1TC_T#|w@^pReY}S3< z4Zr{H#E-leN ziYR*DNcy5<9Bw}}8_yO@o+-Gi@?7ZEkX-WG zdL-XDCk;A969p%2I$m1&kEEr=5~+OxFWo?!@V8fBhf4z4yXiO<%}HgKzkkMZTW|LL z)LHgpa*L=d^fqqP&>|(Cwdm(mM7*s$xSoXq{p+Z~L+$F|Nz5fkYjo#9Uajo?{p;wx zZ8qO0-NWuKGNw5hio920FyH%7n5`vTWYWz9vM+j5>9-1SZSENQsW<{kKdJDTVGX3# z{Xe*SNgKltO@KT9?vg!DWvICJ2EM(xgBcxL0J8TFz}kB<)b*JbHcgLKSm+H; zDgVvFHP52*ii1}b40Az0+#QxKk)*8#xe#*F1ZL!EPy;(h(5tTlxxQfV7T!-)HP(>U zlXU{C`YIL)o&V#tkIBx3ariUhFP^Kj!D;&!F|x&MGS#=%z?s5){=d7=_XKd>jO#JR%nnG3A!G${8x z1_l>>*o(7oS*3#Dy%M^cU!`Pl;p+E9`&~6?+Rviewr*r+Zzz$;)Wf1<3*p%^J^Y#> zO;uBkp(1+^coY}mPr2Vry>%P&JMJ~2;<|%#gcqsk0yQD zf|}U-@-ZnsINn?gxvA1vxk8L{_1=h+ZDcUL^$a!yE+F4;m69g?(aU(C_ek zvFnY0tVPLm3;5C;DcX3u z3GGVevwa0=WX1)dHy3k=xU>-TmAnSZS5D*kyd-?O=rUQiCJ3LOe}QuPLgsFREne^~ z#rjQ4fNDn)t;go^M0WLAm?CKh z?OU%w>Yyd?_|JV-;_V%U4|`CF5_rk=(0tMRz4=q|f@^ccKZ{t?|QWY9Rt6pG!;$>)d*OqnTQo9ZQT z;C3aF^iP%4<^KTJ=0MWYq`>zt%Oh5Hay<0S6O>ZxBb&75!Ax#~U-EYdd$xEuId?Tk zM2dm7+-J+@n}Ly<4puK1h!$~y(6mC0@6=e29uFt6$0<+^Q z!CRL&dbW@;y$Y9-Dv-EZORj?<*=ruiu&YZSPjte&;Nh z1}c-quhu}XnG2M?sU&`#+UW8q3oS>b<2V(;i+(Q}*8S@vb6N)QADXZ6&h$js(vyS% ze+WdjM5El6dr-A3NW{dk;4;t%p1luZQpyf|>W>9@_bw6K6gRMGu`PTZ7l+ho7~~vN z#8+4S+2@6)m}HHKkO}T*yMmRsF#%IgC_$&cQ3HM<8^#&;fBB3U92e`18Tq zaC_V$_UWw))uRa8>Xe!Nwo){){J=_jV@1ClRk;14Jbr8GXO^u{h#wQZvHNs6S}t0O zPK`%IQ+;fOzS<2KvmgRk@EMr+Q=7)Bf5q(~eOMC^fz1=%!FZJncyRJFD^_%cq%dcw z%ln4Idz;G=Z;ya2Llo%zbSu&Hjm5BMY8W?d_7+@^iV$}6EdP{w26uN~DJ#^H;u_mg zyvRwOcF!Gvm6`Fp=FvU)*d2?B)tT^1d`TRBDpRakcnW^ogD_|mW=c=rvW*wwaiy6d z&q_@a*A2fQbeb>Y7a{wSb*>b<4r!tPGgT^|WC)?&SuiI_66KUKu+#Pf^K%LS9UBc^ zpU@|2)eC@Aw>scc#b7S^U_7dv67EZUj6rA3Sk|*q@bcOs9Lmz6 zHmekZB!<$@<27*Xj^%d#vY!sP65xW4llnOM`vTzY}e^e@@CMc~u&0nhS7S>1Z61F3S}LD~MX=C!vDQZ*qFtLu{Y&9(T7W!`EgPFpr%8 z6~&S~!)^h+u;m9hJvl~dzbj&+-yX5C5oaCeo}lh8VEJp8!2ltu<;Gg@;(t=a;CK)| znXQKjdFRE$hsdIIS0X;?v8J^z0{J|{MVMJb_)*hz(U*WKd?;ksrXG~zy{}_&uMy?? z_h*WOHIj*TZ{6fmN|liH#D|UE6eSA3xCtkFXP|Oi33Jj90|||Rc$GwB zD;*>Ff#dhl<@{aDDcH$RnDoJ>0Cy1Q_oGJc8`OLF3S%C-;|F7be|a_sbfaa2o!kk` zXLHc!-ei=1t4!ZSR|~wG8Y0)y&fKTRgYj}Xq_z>TUScSZyr9UhD18)92$O{mNh#cQ z+%Z@(*N}gmIUg^(_`}LBLtbgt0n10%0uCI)-5bw>T2KP(ne`HlzpoZo?~vlb9-ab& zP?eu(*v1O`%(;&KSTe$)9zPCp#UFQy?OJMl`Bno*XtKXU`d7+Rp_WEhWpa_B>0NeC zxUV_cQfRYNvRwVcKQd!)6bTx03vCNB>F@U4d}L`s+0j119jQD7!wlw= zT{0tSK%@d)Hnp3jtu>-u3%-b|^HvDXPAfd+bCjFRbm6^gC-GbtO)hm!4w4Ne`Hlb` zI$Tb8r}S0Fs6KbmEn`ipWBw0=RknkDrX@6e`viOS=Q8ok4oJ#r#(j3zS&%Th(mp?r zf9Na0j}LOuzE}$loPOc9OCR9Z$PhMM#t9+^n294w+~HPYIy_pWMf;j}aL05Do-lJc z|6wwR8{`avs;#QHN6Vg%8x@T`LA$uGRwL9+noNU!*zxkD&rn+wN-{d6soS4{?Df0D zU@6Rud>Rj+=J@?+xO_5CcCEqKW1jSVl_ZvLEQS5*JIVC1&3GfymaN$Qn8aOQNS>OX z#ZyYp;bG=^Uhn@89fwtm6P^hEmRAG0((MR1WZj4!ox{0mb`+PdzAv6%Yf0n#+EH?U z4l0cn{%_{P>4b5OLXKR>0S&(l<=={Mhm#YxFrUsB#p`kJ+(A^zU5>YsA{HR~krAoe ztfg25LKdjuuKtNMASw|2jH1eJ-M+;tuiYoBZgxSb${s#Fx0O{EYSV^U&cbf-AzQX; z82>qUKRO((A>Gxla7=S5%r6q(_=KQieE46 zc8{`ql1A)bwFj;Xv4!)mrcrr$WxB4OVVAohmVA!Ju3I~JmC^&qUU-Kr=`>|3yVl@v zqoZ(8bPV*bw4;ws0W7F}%l60Q;?d}4eDyO3)~FlvySDdmM&L$l4>99GV+1(N=eH0u zFdW828Nd$T9 zjiEG7i`Cz7kQkmFpDMYEKk`UORgGKAT#S@SHLFX5JV%wD*!;Ld>!lL;+ z*lw%XcYGy`FCEQi*Cm&I5#}5*VPomcd`-I0(VFgEu7V>KchhPMSsJB(g6)_xo11-# zv^!2-!>?!0P;&Gw2<&u)voAeZVb*>w8E8bZGS>5hQO8*2++N&&K=>PP&xG!Uv)~6Y z^tf%RxZ#k%xV_zp z)yvzk*!L9Nd_Dr!E|x{zHken-V2rjUyue#b?jX=O zoj81GJpfw9>(GC*K7#n6KJNMNF!-6g78zTAg&%Mb)|`8ah2_HYd9OAO%2$RbdI@ml z?;e!OxDDek#&NU2)3~BRjs6%D1nrjIL?>f1t?pvvrn(yu$!quUJTJMn#C@m0Oun z_g#D|S%nQPN5lpmB{1!4G3jdPgYk|rd~o3=wE2_;Z=NdB0}|CR=cf(baXS}WjU&;) zRZJYM{Ky4i*W~G?PPaL0;1J`>#6rCQJ(dU^y^+!AxJL_A-TsN_Xn}LE{sPIE*$pLQ z?CJa6UNA~>0DhbQm+0G)pm1^@ z56wA7f(qT0w$9tBQ_pv z$LHP=!O!DTT=PgYMl60U)@rPSlmk29eTWAxohJCfN;K&5>8rU~tP34)rohE&U1aZ? zIBs{Wk`($6hM%X$(6yG!_ z)G_6w485iNjpTH6VrA`JG;6&Er31<^{^cKhb$%iS3(UJgmgDKmeM7-t@O(;axC$?) zEAW`W4dQ9Kg(8R1Evz}M69@bqj5V|l7x|2#Z-xFrjv-=y&i3pV&AkNmzqBJYQnNyFVoAb3Ax#$`e-{7YUr#2xiw2O>$Rzv-#zou(Dr_ zzx9`cteQNJ@tcpUJ5G=)|6JHP;IYW|eg|A!u^Y8yN=4a+6uFURfH?EWZFa?bD2;2* z#-2weu)Zyv=Q;PYmG5=v4B`H3`b3wSAMu7~#TQ8B#86z6Z;F~aYV?k$Z&|FxM;tPv zoLF4S#E@^xp)yAkybc=h6D<$f_OCm*Mc6j9d?Z8O9d(1~?P4u{H*ozPj?PG+05!myp;@U32ke`>k{rT zGV?IJ4?PbVnFie9ufTCGdx@gWSHa-$eG(~A$Ly+gQBGHd=NcVmNlqtVbI(#};l@I? zY&wgwYJl(6uOQ$@3T%3Cj!8@}hee;h;u48wW>mdZbm?X+vzJ`N8#PCvX1hBKbM1u| z_c1)NwGV0;klS0jVUSq^JNCF5?vYSDJ!>NxSbVgrncE;TzWPp#m;2#fvlMOL;=m_6 z45msiZxG);W$gNIBY*JuHq2^YMN<@ifc3yfQ0r_C$^P9i@3Rz7THeG=ov!gmYZg$Q zy&L%|$2hFBv819fMeaHEH2=NgCR|QY;}->Y%jeEwzJAYG7Im$aj0RgSCuCCflWvO5 z$Ir&Jge36FwWJa6HsQ!DBl`7aA7q}K&ZGyt;UQEwKdx%?{v$Q;=IwJ-?C)S1YH29p z){p&eC8%PUgSU3>q@zq%qtr1mh*V;E-}K$Awx%8@ofyb9hL7d7p&s!1;5ayzO}WR? zx6oPl8x#d^V$8>K(3`cM>-`b@!_okKO8yW~rUvnA?P!VKR$MXK7`zU>5EZ`}#HHjP z35+mxULvW6o!jbwY>feA0} z+lBRZ;*Q>4bkxmdKa&U3(+$xu`F^W-?DY9CplLXrb;;SzcW?>j-V=7Mg2QBkz`uId zJCw^R%kV)cWO(0w0=Ko!Ecu)Pu8^FLE$>Zv>iJLNUB1GOMW>9r3|mIY>y`L?T{mPr z^b{x#tN8rHUYji>Y%*sGF}ob1Kpx`oi{;HM$kbuf* zB>ptvux%DwU1GySM<<{M27%br8lFoR;gS4GvC5r%t|a3Iy+0*+%@qsKUuT2c=tF#_ z;zg1o>rwv8c;1*POFhHyiHTH?)e%o*W0km0d)|&+fMM2j^UnG z%dsyhm=|4BeyT#;Fjm*GQq;$ZXVHxenb<)S<735>*OV z%$3})?eb};RWi98_ZoaJHQ^-pmnqqbGW9+)!jeC z#%ZSfVUR1P)P?g8+wNe4kfk1%p~|P)yFo?L1oRrRlFI)mgk3{EvxOxg@J7f+4Kdz? zXJ$LH`2)}4LjNh^9Rc-d+p`2$@3MeVPLa@}YfNRvj_2pb3Hz#=;Z)~v4qVNPC7D7V z>uTk2Dl%A43w-zD`Ti~B>#kG6zC2l+t{P7-KKI}=Ql$9!At?~?=mM*ic#F^M|D!+K zV@Ua=OuV$Ng^W4nKsLpO!(kgoJfXOW{~W5o*L^z3WA7MYNOTp5<%P^$qXeBgw+xTo z=*2hh?BI`RD=?WWxZYZsKKGmrO`U|Ujed?nPB(Dk=+ihW?xHC0$q@)i-^=v}&*3BG z?0NI73cNAKnGV)SqcGVKTG3z>6&+j@ar6$(uAVobM-MyDz1Sn%w*sK_(SI~qZavI;rvtrzwt}Hp zk7jBc@!B2_yq`UbCyi3%DO$p;F*p)G>bgRnkW1_=kfDAp|Dodd1KgtJCKR-!@D1VZ z^y^wHz~XfJT*xLEPMyN<`l#?#UrhK3wNvb1>P-A~tQ5a5eFuZpPtwPBo$$U|@P)|^ zqtEGI82+J)8EYls;<}NvNn4RWk=2FaXOHn|ax#4Aw~f&JVG0{*cMJO5_hG}e=lHp- z8|xgCxL?yccIM9-9I-Bf4%xC4wSLD?{kb3T#4KBGc4!O_mg+#Sc^jDgp;jhSc!(RN z_<^jmJAF1#o{sSg!@B~L{N~;*IRCXF9_^6k+AlI`*QaK5{aZ`UFY@GPR|+}1*dQVy zWc_st{GNB&wzh#1Ub3L^BR^p0t+30MmgGct~ zV`slD_1TE}!pYHa3T7 z=_UE=K20Y{d$chfbl;EX)+o^jb%&XGe7d;!$u%xHZYErHS#4)T-2%ENIRAW)5f5| z)YM*v-q6(N#(!$b&JEtQtfH7_`!Asufq`V@z69=hmx1Y&3NmWLBv>?w@-h1lu+qIM zymV7N-{yCpc>R;*39l?DzG)zlfjva&Z6T4OqxsdlW66(4LY^r8D_GjJGrlaG-yZ1% zzjytC6XrRz^qUUt)H4M4ld5!+mnrWKyb2Y@>eTSkK3JXK2}`!xLi^ZA+_LC5>y+4r z7pAy^iq1QBJ>)+;DBTLZ>*Bc0(dBe(+D5X}eLg%ratS)VU4!C@>cX*=EVgWXflUHa z{PKd;^xr-kdM0c(SDU0l6KrE~RaPkNd?&o?JUt3+)&uCd)3?dyxLl%Rl25j_Ev6Us zDU`LBvU|^~Atg>s^lB2BZ>%J&P?X_z5vSQbf!A|1eI#G55=eCeC1}Cbo6r$3i2B*y z0V|6jRP@!tG&RB7Dmbqi1BO$#|JK2Rg^o~PeiXcKYvG$EBdL3|46}YZ5KcJGgpM*f zIK6)^f3oE`erlgjLtLlg6s;_N`Ckj=%F(#}dlAk$*Tkh~45jn;ZUU)*l~hx%nV(#p z%6qSVqf0%N$*rTyuxL~W)v3`(Yu8$Ft%bm2DM#qr9ZI&QOJZ?UA$@&Ulk-W|e5utF zQSX6D7G>hkVQmEXw0hEye`Kg{lq2s{xJb{E!+6>&o6A_pOwlx#5ZDqXFx;<@&;PcM z&VH^cFjORI&*j~`K1c7mgrxEmKg#5p3DirK^0wy>XOudKDq7;47TG@|Z2K(aUi(7D6>u>m);K9=;jUzQ?_t8>y z0@tgHpt`mq$j|e@7~=#~9WFSM7M>&Kmk2TWtpo4U^~nFbTpkyW^VAx5x=dh1eOq8f zAF0ovf!D7@NPe{*48>KC$oh7;`1)5c{pszH3L7J_~^v~?z2p^C$kxF~$zHf$L}_JgvuN@xZN5HmuYd!tpZvh_rm$n1hE{t!ME0I@ zVgKAXUcCjV_cIebC@O@U`6Fq#kKlH^QbKlVBW|BDj&!6a;ljKw&{GOSx4x~|R@4OL zX)*9#<~a7*j_1#B{sPy^Qv5t40*42jXP*yUCOGaiZ2Z{||I!8K{kNICZ@DaJmgtqq zSx%z2UPPhW^^2lOR@->WUK4I1^^_dhpUd-;HbR@35&1MVoR8gip7_okjcr2b>`L}^ z?3p-_zW#U(6<%8NXKL2;M1lt0@7hB?%pQV!3e;HMj`Fgbe~WNfofh^B=)e`F0ylGq z0<%gFf+SNrw7nNevu;uhx0IvDHx+S}wX;Cu-5LJqTReUg1!9m6|;{uDFaV3%c$-L+wa*0^(+97b60VqH6Z zli#fA^H@6UqU!ZE&*KG`NE#Ll$Y$&F2|a#9@G2N?=F?<4d9;le=~(K+{nrnmyQ?L* z#gzxR=lo;zY}-doz3y=Z-xNCKnK`|6cQZY;?z*T*_cpwd{)`KbtMcp1Rp?6zC4ROh znEbJOkE@OD3w(w~@+CS5p6hRc=feZ2Q&u03GDzZoM$CePRkF0^x|}c*+JwiBv@_=) zzTEg&Bzw(%#-5qPiQBJaV)1|9ZOHps30y4&bmL ztT)+q0C@=+k(Y?q1(86!&p|+A0xQIV8G^Q5_CgE0cNf@o+DIh80a|P*WriTYc#D#N zqNMUZ&#8x4u`?P z@n#-kEOY~O&cpDx7^vp>xxq?5H4e=x?IkZ7)kwA}85)rNhz=HXLh>3@v1hHJ>R z_XT2;_W_As--ml<=CJbyZ)*Nv2g|>o{S$Ppjp<@q=4oz+>N+=4T?cmrp-{UJOu8pO2HVg? zlDcgbh0Dd{&w*Iz3qOgKB?cmpl;aaF0~|9bMRiKvHMYKrgvk~zxG92*-2FE)>VKu3 zzP`lu49&(&8=pb%|8B&af_#!P$?-Sm`}7R&_P!NEft$n?o!##OT(>_=uxZHI5(+1K zcR|LRX1X!43ye=Qv5n6t+7Mj9=7s?zTHa1H4@=1O(L9`d>L#|_%txO*x{a!<1#q~^ zjht`lIS~It3M0@3I#ZwMkmu`U;%>FmJC&0>XbUAhGi59GL=eF#1!ZmR;QGy zBnpNtVkGfOSy7xqAxULqiZofFQYKe1uW4h#l1lln_&zXQmy>ytNmpVkz&BR7B{qZ% znNEOtx|HiXSB}zGw`hl39b|Yc7zS-x$@s-?C_8A2QyeQ`Dnd>tD1Gtb3d$WDzd zE^CJ3Jq8$gX$0t4RhQ{ISH*DpQVJ3c6v5nDt(c%5!Twty(pnZ oulq0JOgZuq(f^SsN+6`-Z7D0UL%fKhWXezt%f1_`R*41w0xpsdE&u=k literal 0 HcmV?d00001 diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index 3c5753a..cf55691 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -72,8 +72,8 @@ def __init__(self, *args, **kwargs): self.input_file = os.path.abspath(os.path.expandvars(os.path.expanduser(self.input_type))) if not os.path.exists(self.input_file): raise ValueError( - f"input type '{self.input_type}' is neither 'random' nor 'incremental' nor 'zeros' nor a path to an existing " - f"root file", + f"input type '{self.input_type}' is neither 'random' nor 'incremental' nor 'zeros' nor " + f"a path to an existing root file", ) # cached model content diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index 99e90be..6f81539 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -75,9 +75,14 @@ def run(self): } # load the template content - template = "$MLP_BASE/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py" - content = law.LocalFileTarget(template).load(formatter="text") + if self.model_data["inference_engine"] == "tf": + template = "$MLP_BASE/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py" + elif self.model_data["inference_engine"] == "onnx": + template = "$MLP_BASE/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py" + else: + raise Exception("The only inference_engine supported are 'tf' and 'onnx'") + content = law.LocalFileTarget(template).load(formatter="text") # replace variables for key, value in template_vars.items(): content = content.replace(f"__{key}__", str(value)) @@ -299,11 +304,14 @@ class PlotRuntimesMultipleParams( cmssw_versions = law.CSVParameter( cls=luigi.Parameter, default=None, - description="comma-separated list of CMSSW versions; default: ('CMSSW_12_2_4','CMSSW_12_2_2')", + description="comma-separated list of CMSSW versions; default: (self.cmssw_version,)", brace_expand=True, ) - # create params_to_write if model_files or cmssw_versions is None? -> gets difficult with itertools product if only one param is changed + params_to_write = [] + + # create params_to_write for labels if model_files or cmssw_versions is None? -> + # gets difficult with itertools product if only one param is changed def requires(self): self.fill_undefined_param_values() @@ -312,28 +320,43 @@ def requires(self): def output(self): self.fill_undefined_param_values() + self.fill_params_to_write() all_params = self.factorize_params() all_params_list = ["_".join(all_params_item) for all_params_item in all_params] all_params_repr = "_".join(all_params_list) return self.local_target(f"runtime_plot_params_{all_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf") # noqa + def fill_params_to_write(self): + if self.params_to_write == []: + for param in [self.model_files, self.cmssw_versions]: + if len(param) > 1: + if param is self.model_files: + network_names = [] + for model_file in self.model_files: + model_data = law.LocalFileTarget(model_file).load(formatter="json") + network_names += [model_data["network_name"]] + self.params_to_write += [tuple(network_names)] + else: + self.params_to_write += [param] + def factorize_params(self): - # get additional parameters plotting - network_names = [] - for model_file in self.model_files: - model_data = law.LocalFileTarget(model_file).load(formatter="json") - network_names += [model_data["network_name"]] + # # get additional parameters plotting + # network_names = [] + # for model_file in self.model_files: + # model_data = law.LocalFileTarget(model_file).load(formatter="json") + # network_names += [model_data["network_name"]] # combine all parameters together - all_params = list(itertools.product(network_names, self.cmssw_versions)) + # all_params = list(itertools.product(network_names, self.cmssw_version)) + all_params = list(itertools.product(*self.params_to_write)) return all_params def fill_undefined_param_values(self): - if self.model_files is None: - self.model_files = tuple(self.model_file) + if self.model_files[0] is None: + self.model_files = (self.model_file,) - if self.cmssw_versions is None: - self.cmssw_versions = tuple(self.cmssw_version) + if self.cmssw_versions[0] is None: + self.cmssw_versions = (self.cmssw_version,) @view_output_plots def run(self): @@ -342,9 +365,9 @@ def run(self): output.parent.touch() self.fill_undefined_param_values() + self.fill_params_to_write() input_paths = [inp.path for inp in self.input()] - print(input_paths) all_params = self.factorize_params() # create the plot From 53c25d55f170760946256a045094373a4cddce71 Mon Sep 17 00:00:00 2001 From: nprouvost <49162277+nprouvost@users.noreply.github.com> Date: Wed, 24 Jan 2024 16:32:30 +0100 Subject: [PATCH 02/12] refactoring multi models --- mlprof/tasks/parameters.py | 160 ++++++++++++++++++++++----- mlprof/tasks/runtime.py | 220 +++++++++++-------------------------- 2 files changed, 194 insertions(+), 186 deletions(-) diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index cf55691..5ae7ae0 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -4,7 +4,10 @@ Collection of the recurrent luigi parameters for different tasks. """ +from __future__ import annotations + import os +from dataclasses import dataclass import luigi import law @@ -12,6 +15,49 @@ from mlprof.tasks.base import BaseTask +@dataclass +class Model: + + model_file: str + name: str | None = None + label: str | None = None + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # cached data + self._data = None + + @property + def data(self): + if self._data is None: + self._data = law.LocalFileTarget(self.model_file).load(formatter="json") + return self._data + + @property + def full_name(self): + if self.name: + return self.name + + # create a hash + model_file = os.path.expandvars(os.path.expanduser(self.model_file)) + name = os.path.splitext(os.path.basename(model_file))[0] + return f"{name}{law.util.create_hash(model_file)}" + + @property + def full_model_label(self): + if self.label: + return self.label + + # get the network_name field in the model data + network_name = self.data.get("network_name") + if network_name: + return network_name + + # fallback to the full model name + return self.full_name + + class CMSSWParameters(BaseTask): """ Parameters related to the CMSSW environment @@ -40,16 +86,6 @@ class RuntimeParameters(BaseTask): General parameters for the model definition and the runtime measurement. """ - model_file = luigi.Parameter( - default="$MLP_BASE/examples/simple_dnn/model.json", - description="json file containing information of model to be tested; " - "default: $MLP_BASE/examples/simple_dnn/model.json", - ) - model_name = luigi.Parameter( - default=law.NO_STR, - description="when set, use this name for storing outputs instead of a hashed version of " - "--model-file; default: empty", - ) input_type = luigi.Parameter( default="random", description="either 'random', 'incremental', 'zeros', or a path to a root file; default: random", @@ -76,40 +112,106 @@ def __init__(self, *args, **kwargs): f"a path to an existing root file", ) - # cached model content - self._model_data = None + def store_parts(self): + parts = super().store_parts() - @property - def model_data(self): - if self._model_data is None: - self._model_data = law.LocalFileTarget(self.model_file).load(formatter="json") - return self._model_data + # build a combined string that represents the significant parameters + params = [ + f"input_{law.util.create_hash(self.input_file) if self.input_file else self.input_type}", + f"nevents_{self.n_events}", + f"ncalls_{self.n_calls}", + ] + parts.insert_before("version", "runtime_params", "__".join(params)) - @property - def full_model_name(self): - if self.model_name not in (None, law.NO_STR): - return self.model_name + return parts - # create a hash - model_file = os.path.expandvars(os.path.expanduser(self.model_file)) - model_name = os.path.splitext(os.path.basename(model_file))[0] - return f"{model_name}{law.util.create_hash(model_file)}" + +class ModelParameters(BaseTask): + """ + General parameters for the model definition and the runtime measurement. + """ + + model_file = luigi.Parameter( + default="$MLP_BASE/examples/simple_dnn/model.json", + description="json file containing information of model to be tested; " + "default: $MLP_BASE/examples/simple_dnn/model.json", + ) + model_name = luigi.Parameter( + default=law.NO_STR, + description="when set, use this name for storing outputs instead of a hashed version of " + "--model-file; default: empty", + ) + model_label = luigi.Parameter( + default=law.NO_STR, + description="when set, use this label in plots; when empty, the 'network_name' field in the model json data is " + "used when existing, and full_model_name otherwise; default: empty", + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.model = Model( + model_file=self.model_file, + name=self.name if self.name != law.NO_STR else None, + label=self.label if self.label != law.NO_STR else None, + ) def store_parts(self): parts = super().store_parts() # build a combined string that represents the significant parameters params = [ - f"model_{self.full_model_name}", - f"input_{law.util.create_hash(self.input_file) if self.input_file else self.input_type}", - f"nevents_{self.n_events}", - f"ncalls_{self.n_calls}", + f"model_{self.model.full_name}", ] parts.insert_before("version", "model_params", "__".join(params)) return parts +# class MultiModelParameters(BaseTask): +# """ +# General parameters for the model definition and the runtime measurement. +# """ + +# model_files = luigi.Parameter( +# default="$MLP_BASE/examples/simple_dnn/model.json", +# description="json file containing information of model to be tested; " +# "default: $MLP_BASE/examples/simple_dnn/model.json", +# ) +# model_names = luigi.Parameter( +# default=law.NO_STR, +# description="when set, use this name for storing outputs instead of a hashed version of " +# "--model-file; default: empty", +# ) +# model_labels = luigi.Parameter( +# default=law.NO_STR, +# description="when set, use this label in plots; when empty, the 'network_name' field in the model json data is " +# "used when existing, and full_model_name otherwise; default: empty", +# ) + +# def __init__(self, *args, **kwargs): +# super().__init__(*args, **kwargs) + +# # TODO: check that lengths match ... +# pass + +# self.models = [ +# Model() +# for x, y, z in zip(...) +# ] + +# def store_parts(self): +# parts = super().store_parts() + +# # build a combined string that represents the significant parameters +# params = [ +# f"model_{self.model.full_name}", +# ] +# parts.insert_before("version", "model_params", "__".join(params)) + +# return parts + + class BatchSizesParameters(BaseTask): """ Parameter to add several batch sizes to perform the measurement on diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index 6f81539..e13c79e 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -11,12 +11,14 @@ import law from mlprof.tasks.base import CommandTask, PlotTask, view_output_plots -from mlprof.tasks.parameters import RuntimeParameters, CMSSWParameters, BatchSizesParameters, CustomPlotParameters +from mlprof.tasks.parameters import ( + RuntimeParameters, ModelParameters, CMSSWParameters, BatchSizesParameters, CustomPlotParameters, +) from mlprof.tasks.sandboxes import CMSSWSandboxTask from mlprof.plotting.plotter import plot_batch_size_several_measurements -class CreateRuntimeConfig(RuntimeParameters, CMSSWParameters): +class CreateRuntimeConfig(RuntimeParameters, ModelParameters, CMSSWParameters): default_input_files = { "CMSSW_*": ["/afs/cern.ch/user/n/nprouvos/public/testfile.root"], @@ -91,7 +93,7 @@ def run(self): output.dump(content, formatter="text") -class MeasureRuntime(CommandTask, RuntimeParameters, CMSSWSandboxTask): +class MeasureRuntime(CommandTask, RuntimeParameters, ModelParameters, CMSSWSandboxTask): """ Task to provide the time measurements of the inference of a network in cmssw, given the input parameters and a single batch size @@ -119,7 +121,7 @@ def build_command(self): ] -class MergeRuntimes(RuntimeParameters, CMSSWParameters, BatchSizesParameters): +class MergeRuntimes(RuntimeParameters, ModelParameters, CMSSWParameters, BatchSizesParameters): def requires(self): return [ @@ -147,7 +149,14 @@ def run(self): ) -class PlotRuntimes(RuntimeParameters, CMSSWParameters, BatchSizesParameters, PlotTask, CustomPlotParameters): +class PlotRuntimes( + RuntimeParameters, + ModelParameters, + CMSSWParameters, + BatchSizesParameters, + PlotTask, + CustomPlotParameters, +): """ Task to plot the results from the runtime measurements depending on the batch sizes given as parameters, default are 1, 2 and 4. @@ -182,108 +191,8 @@ def run(self): print("plot saved") -class PlotRuntimesMultipleNetworks( - RuntimeParameters, - CMSSWParameters, - BatchSizesParameters, - PlotTask, - CustomPlotParameters, -): - """ - Task to plot the results from the runtime measurements for several networks, depending on the batch sizes given as - parameters, default are 1, 2 and 4. - """ - - sandbox = "bash::$MLP_BASE/sandboxes/plotting.sh" - - model_files = law.CSVParameter( - description="comma-separated list of json files containing information of models to be tested", - ) - - def requires(self): - return [ - MergeRuntimes.req(self, model_file=model_file) - for model_file in self.model_files - ] - - def output(self): - network_names = [req.model_data["network_name"] for req in self.requires()] - network_names_repr = "_".join(network_names) - return self.local_target( - f"runtime_plot_networks_{network_names_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf", - ) - - @view_output_plots - def run(self): - # prepare the output directory - output = self.output() - output.parent.touch() - - # create the plot - network_names = [req.model_data["network_name"] for req in self.requires()] - input_paths = [inp.path for inp in self.input()] - plot_batch_size_several_measurements( - self.batch_sizes, - input_paths, - output.path, - network_names, - self.custom_plot_params, - ) - - -class PlotRuntimesMultipleCMSSW( - RuntimeParameters, - CMSSWParameters, - BatchSizesParameters, - PlotTask, - CustomPlotParameters, -): - """ - Task to plot the results from the runtime measurements for inferences performed in multiple cmssw versions, - depending on the batch sizes given as parameters, default are 1, 2 and 4. - """ - - sandbox = "bash::$MLP_BASE/sandboxes/plotting.sh" - - cmssw_versions = law.CSVParameter( - cls=luigi.Parameter, - default=("CMSSW_12_2_4", "CMSSW_12_2_2"), - description="comma-separated list of CMSSW versions; default: CMSSW_12_2_4,CMSSW_12_2_2", - brace_expand=True, - ) - - def requires(self): - return [ - MergeRuntimes.req(self, cmssw_version=cmssw_version) - for cmssw_version in self.cmssw_versions - ] - - def output(self): - cmssw_versions_repr = "_".join(self.cmssw_versions) - return self.local_target( - f"runtime_plot_multiple_cmssw_{cmssw_versions_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf", - ) - - @view_output_plots - def run(self): - # prepare the output directory - output = self.output() - output.parent.touch() - - # create the plot - input_paths = [inp.path for inp in self.input()] - plot_batch_size_several_measurements( - self.batch_sizes, - input_paths, - output.path, - self.cmssw_versions, - self.custom_plot_params, - ) - - class PlotRuntimesMultipleParams( RuntimeParameters, - CMSSWParameters, BatchSizesParameters, PlotTask, CustomPlotParameters, @@ -298,65 +207,60 @@ class PlotRuntimesMultipleParams( model_files = law.CSVParameter( description="comma-separated list of json files containing information of models to be tested", - default=None, + brace_expand=True, + ) + model_names = law.CSVParameter( + default=(), + description="comma-separated list of names of models defined in --model-files to use in output paths; " + "when set, the number of names must match the number of model files; default: ()", ) - cmssw_versions = law.CSVParameter( - cls=luigi.Parameter, - default=None, - description="comma-separated list of CMSSW versions; default: (self.cmssw_version,)", + default=(CMSSWParameters.cmssw_version._default,), + description=f"comma-separated list of CMSSW versions; default: ({CMSSWParameters.cmssw_version._default},)", + brace_expand=True, + ) + scram_archs = law.CSVParameter( + default=(CMSSWParameters.scram_arch._default,), + description=f"comma-separated list of SCRAM architectures; default: ({CMSSWParameters.scram_arch._default},)", brace_expand=True, ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # check that, if given, the number of model names matches that of model names + n_models = len(self.model_files) + if len(self.model_names) not in (n_models, 0): + raise ValueError("the number of model names does not match the number of model files") + + # list of sequences over which the product is performed + self.product_names = ["model_file", "cmssw_version", "scram_arch"] + self.product_sequences = [ + list(zip(self.model_files, self.model_names or (n_models * [None]))), + self.cmssw_versions, + self.scram_archs, + ] + params_to_write = [] # create params_to_write for labels if model_files or cmssw_versions is None? -> # gets difficult with itertools product if only one param is changed def requires(self): - self.fill_undefined_param_values() - all_params = list(itertools.product(self.model_files, self.cmssw_versions)) - return [MergeRuntimes.req(self, model_file=params[0], cmssw_version=params[1]) for params in all_params] + return [ + MergeRuntimes.req(self, **dict(zip(self.product_names, values))) + for values in itertools.product(*self.product_sequences) + ] def output(self): - self.fill_undefined_param_values() - self.fill_params_to_write() - all_params = self.factorize_params() - all_params_list = ["_".join(all_params_item) for all_params_item in all_params] - all_params_repr = "_".join(all_params_list) - return self.local_target(f"runtime_plot_params_{all_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf") # noqa - - def fill_params_to_write(self): - if self.params_to_write == []: - for param in [self.model_files, self.cmssw_versions]: - if len(param) > 1: - if param is self.model_files: - network_names = [] - for model_file in self.model_files: - model_data = law.LocalFileTarget(model_file).load(formatter="json") - network_names += [model_data["network_name"]] - self.params_to_write += [tuple(network_names)] - else: - self.params_to_write += [param] - - def factorize_params(self): - # # get additional parameters plotting - # network_names = [] - # for model_file in self.model_files: - # model_data = law.LocalFileTarget(model_file).load(formatter="json") - # network_names += [model_data["network_name"]] - - # combine all parameters together - # all_params = list(itertools.product(network_names, self.cmssw_version)) - all_params = list(itertools.product(*self.params_to_write)) - return all_params - - def fill_undefined_param_values(self): - if self.model_files[0] is None: - self.model_files = (self.model_file,) - - if self.cmssw_versions[0] is None: - self.cmssw_versions = (self.cmssw_version,) + # TODO: encode all important params in a human-readable way + # TODO: also check which parameters should go into store parts + return self.local_target("test.pdf") + # self.fill_params_to_write() + # all_params = self.factorize_params() + # all_params_list = ["_".join(all_params_item) for all_params_item in all_params] + # all_params_repr = "_".join(all_params_list) + # return self.local_target(f"runtime_plot_params_{all_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf") # noqa @view_output_plots def run(self): @@ -364,12 +268,14 @@ def run(self): output = self.output() output.parent.touch() - self.fill_undefined_param_values() - self.fill_params_to_write() - input_paths = [inp.path for inp in self.input()] - all_params = self.factorize_params() # create the plot - plot_batch_size_several_measurements(self.batch_sizes, input_paths, - output.path, all_params, self.custom_plot_params) + # TODO: maybe adjust labels + plot_batch_size_several_measurements( + self.batch_sizes, + input_paths, + output.path, + list(itertools.product(self.product_sequences)), + self.custom_plot_params, + ) From 4a85e12d155e3cd968db1c5ffefe6c1f20b355aa Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Thu, 25 Jan 2024 13:03:55 +0100 Subject: [PATCH 03/12] linting --- .../plugins/ONNXPluginRuntime.cpp | 123 +++++++++--------- mlprof/tasks/parameters.py | 4 +- 2 files changed, 64 insertions(+), 63 deletions(-) diff --git a/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp b/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp index e1b973a..cc4f804 100644 --- a/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp +++ b/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp @@ -1,38 +1,39 @@ /* - * Example plugin to demonstrate the direct multi-threaded inference with ONNX Runtime. + * Example plugin to demonstrate the direct multi-threaded inference with ONNX + * Runtime. */ #include #include +#include #include #include #include #include -#include #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/stream/EDAnalyzer.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" - -#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h" - #include "MLProf/Utils/interface/utils.h" +#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h" using namespace cms::Ort; -class ONNXRuntimePlugin : public edm::stream::EDAnalyzer> { -public: +class ONNXRuntimePlugin + : public edm::stream::EDAnalyzer> { + public: explicit ONNXRuntimePlugin(const edm::ParameterSet &, const ONNXRuntime *); - static void fillDescriptions(edm::ConfigurationDescriptions&); + static void fillDescriptions(edm::ConfigurationDescriptions &); - static std::unique_ptr initializeGlobalCache(const edm::ParameterSet &); + static std::unique_ptr initializeGlobalCache( + const edm::ParameterSet &); static void globalEndJob(const ONNXRuntime *); -private: + private: void beginJob(); - void analyze(const edm::Event&, const edm::EventSetup&); + void analyze(const edm::Event &, const edm::EventSetup &); void endJob(); inline float drawNormal() { return normalPdf_(rndGen_); } @@ -56,12 +57,13 @@ class ONNXRuntimePlugin : public edm::stream::EDAnalyzer normalPdf_; std::vector> input_shapes_; - FloatArrays data_; // each stream hosts its own data + FloatArrays inputArrays_; // each stream hosts its own data }; - -void ONNXRuntimePlugin::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { - // defining this function will lead to a *_cfi file being generated when compiling +void ONNXRuntimePlugin::fillDescriptions( + edm::ConfigurationDescriptions &descriptions) { + // defining this function will lead to a *_cfi file being generated when + // compiling edm::ParameterSetDescription desc; // the path to the file containing the graph desc.add("graphPath"); @@ -76,22 +78,23 @@ void ONNXRuntimePlugin::fillDescriptions(edm::ConfigurationDescriptions& descrip // the rank (number of dimensions) of each input tensor desc.add>("inputRanks"); // flat list of sizes of each dimension of each input tensor - // (for a graph with a 1D and a 2D input tensor, this would be a vector of three values) + // (for a graph with a 1D and a 2D input tensor, this would be a vector of + // three values) desc.add>("flatInputSizes"); // batch sizes to test desc.add("batchSize"); // the number of calls to the graph to measure the runtime desc.add("nCalls"); - // desc.add("model_path", edm::FileInPath("MLProf/ONNXRuntimeModule/data/model.onnx")); - // desc.add>("input_names", std::vector({"my_input"})); descriptions.addWithDefaultLabel(desc); } - -ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, const ONNXRuntime *cache) - : inputTensorNames_(iConfig.getParameter>("inputTensorNames")), - outputTensorNames_(iConfig.getParameter>("outputTensorNames")), +ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, + const ONNXRuntime *cache) + : inputTensorNames_( + iConfig.getParameter>("inputTensorNames")), + outputTensorNames_( + iConfig.getParameter>("outputTensorNames")), outputFile_(iConfig.getParameter("outputFile")), inputTypeStr_(iConfig.getParameter("inputType")), inputRanks_(iConfig.getParameter>("inputRanks")), @@ -101,32 +104,37 @@ ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, const ONN nInputs_(inputTensorNames_.size()), nPreCalls_(10), rndGen_(rnd_()), - normalPdf_(0.0, 1.0) - { + normalPdf_(0.0, 1.0) { // the number of input ranks must match the number of input tensors if ((int)inputRanks_.size() != nInputs_) { - throw cms::Exception("InvalidInputRanks") << "number of input ranks must match number of input tensors"; + throw cms::Exception("InvalidInputRanks") + << "number of input ranks must match number of input tensors"; } // the input must be at least 1 dimensional for (auto rank : inputRanks_) { if (rank < 1) { - throw cms::Exception("InvalidRank") << "only ranks above 0 are supported, got " << rank; + throw cms::Exception("InvalidRank") + << "only ranks above 0 are supported, got " << rank; } } // the sum of ranks must match the number of flat input sizes - if (std::accumulate(inputRanks_.begin(), inputRanks_.end(), 0) != (int)flatInputSizes_.size()) { + if (std::accumulate(inputRanks_.begin(), inputRanks_.end(), 0) != + (int)flatInputSizes_.size()) { throw cms::Exception("InvalidFlatInputSizes") - << "sum of input ranks must match number of flat input sizes, got " << flatInputSizes_.size(); + << "sum of input ranks must match number of flat input sizes, got " + << flatInputSizes_.size(); } // batch size must be positive if (batchSize_ < 1) { - throw cms::Exception("InvalidBatchSize") << "batch sizes must be positive, got " << batchSize_; + throw cms::Exception("InvalidBatchSize") + << "batch sizes must be positive, got " << batchSize_; } // input sizes must be positive for (auto size : flatInputSizes_) { if (size < 1) { - throw cms::Exception("InvalidInputSize") << "input sizes must be positive, got " << size; + throw cms::Exception("InvalidInputSize") + << "input sizes must be positive, got " << size; } } // check the input type @@ -138,39 +146,47 @@ ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, const ONN inputType_ = mlprof::InputType::Zeros; } else { throw cms::Exception("InvalidInputType") - << "input type must be either 'incremental', 'zeros' or 'random', got " << inputTypeStr_; + << "input type must be either 'incremental', 'zeros' or 'random', got " + << inputTypeStr_; } // initialize the input_shapes array with inputRanks_ and flatInputSizes_ int i = 0; for (auto rank : inputRanks_) { - std::vector input_shape(flatInputSizes_.begin() + i, flatInputSizes_.begin() + i + rank); + std::vector input_shape(flatInputSizes_.begin() + i, + flatInputSizes_.begin() + i + rank); input_shape.insert(input_shape.begin(), batchSize_); input_shapes_.push_back(input_shape); i += rank; } // initialize the input data arrays - // note there is only one element in the FloatArrays type (i.e. vector>) variable + // note there is only one element in the FloatArrays type (i.e. + // vector>) variable for (int i = 0; i < nInputs_; i++) { - data_.emplace_back(flatInputSizes_[i] * batchSize_, 0); + inputArrays_.emplace_back(batchSize_ * flatInputSizes_[i], 0); } } - -std::unique_ptr ONNXRuntimePlugin::initializeGlobalCache(const edm::ParameterSet &iConfig) { - return std::make_unique(edm::FileInPath(iConfig.getParameter("graphPath")).fullPath()); +std::unique_ptr ONNXRuntimePlugin::initializeGlobalCache( + const edm::ParameterSet &iConfig) { + return std::make_unique( + edm::FileInPath(iConfig.getParameter("graphPath")) + .fullPath()); } void ONNXRuntimePlugin::globalEndJob(const ONNXRuntime *cache) {} -void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, const edm::EventSetup &iSetup) { +void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, + const edm::EventSetup &iSetup) { for (int i = 0; i < nInputs_; i++) { - std::vector &group_data = data_[i]; + std::vector &group_data = inputArrays_[i]; // fill the input for (int i = 0; i < (int)group_data.size(); i++) { - group_data[i] = inputType_ == mlprof::InputType::Incremental ? float(i) : - inputType_ == mlprof::InputType::Zeros ? float(0) : - drawNormal(); + group_data[i] = + inputType_ == mlprof::InputType::Incremental + ? float(i) + : (inputType_ == mlprof::InputType::Zeros ? float(0) + : drawNormal()); } } @@ -179,38 +195,23 @@ void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, const edm::EventSetup // pre calls to "warm up" for (int r = 0; r < nPreCalls_; r++) { - outputs = globalCache()->run(inputTensorNames_, data_, input_shapes_, outputTensorNames_, batchSize_); - // std::cout << "nprerun" << r << std::endl; + outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, + outputTensorNames_, batchSize_); } // actual calls to measure runtimes std::vector runtimes; for (int r = 0; r < nCalls_; r++) { auto start = std::chrono::high_resolution_clock::now(); - outputs = globalCache()->run(inputTensorNames_, data_, input_shapes_, outputTensorNames_, batchSize_); + outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, + outputTensorNames_, batchSize_); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration runtime_in_seconds = (end - start); - // std::cout << "nrun" << r << std::endl; - // std::cout << "runtime in seconds" << runtime_in_seconds.count() << std::endl; runtimes.push_back(runtime_in_seconds.count() * 1000); } - // // print the input and output data - // std::cout << "input data -> "; - // for ( const auto &input_tensor : data_ ){ - // for ( const auto &value : input_tensor ) std::cout << value << ' '; - // std::cout << std::endl; - // } - // std::cout << std::endl << "output data -> "; - // for (auto &output_tensor: outputs) { - // for ( const auto &value : output_tensor ) std::cout << value << ' '; - // std::cout << std::endl; - // } - // std::cout << std::endl; - // save them mlprof::writeRuntimes(outputFile_, batchSize_, runtimes); } - DEFINE_FWK_MODULE(ONNXRuntimePlugin); diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index 5ae7ae0..88fb66a 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -185,8 +185,8 @@ def store_parts(self): # ) # model_labels = luigi.Parameter( # default=law.NO_STR, -# description="when set, use this label in plots; when empty, the 'network_name' field in the model json data is " -# "used when existing, and full_model_name otherwise; default: empty", +# description="when set, use this label in plots; when empty, the 'network_name' field in the " +# "model json data is used when existing, and full_model_name otherwise; default: empty", # ) # def __init__(self, *args, **kwargs): From 0620f4f1a8a84503d0ce3001019434ee45a9062e Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Wed, 31 Jan 2024 13:04:45 +0100 Subject: [PATCH 04/12] refactoring python 3.7 and more, to be adapted to python 3.6 --- mlprof/tasks/parameters.py | 113 ++++++++++++++++++++++--------------- mlprof/tasks/runtime.py | 20 ++----- 2 files changed, 72 insertions(+), 61 deletions(-) diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index 88fb66a..7f6fa74 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -4,7 +4,7 @@ Collection of the recurrent luigi parameters for different tasks. """ -from __future__ import annotations +# from __future__ import annotations import os from dataclasses import dataclass @@ -144,7 +144,7 @@ class ModelParameters(BaseTask): model_label = luigi.Parameter( default=law.NO_STR, description="when set, use this label in plots; when empty, the 'network_name' field in the model json data is " - "used when existing, and full_model_name otherwise; default: empty", + "used when existing, and full_name otherwise; default: empty", ) def __init__(self, *args, **kwargs): @@ -152,8 +152,8 @@ def __init__(self, *args, **kwargs): self.model = Model( model_file=self.model_file, - name=self.name if self.name != law.NO_STR else None, - label=self.label if self.label != law.NO_STR else None, + name=self.model_name if self.model_name != law.NO_STR else None, + label=self.model_label if self.model_label != law.NO_STR else None, ) def store_parts(self): @@ -168,48 +168,69 @@ def store_parts(self): return parts -# class MultiModelParameters(BaseTask): -# """ -# General parameters for the model definition and the runtime measurement. -# """ - -# model_files = luigi.Parameter( -# default="$MLP_BASE/examples/simple_dnn/model.json", -# description="json file containing information of model to be tested; " -# "default: $MLP_BASE/examples/simple_dnn/model.json", -# ) -# model_names = luigi.Parameter( -# default=law.NO_STR, -# description="when set, use this name for storing outputs instead of a hashed version of " -# "--model-file; default: empty", -# ) -# model_labels = luigi.Parameter( -# default=law.NO_STR, -# description="when set, use this label in plots; when empty, the 'network_name' field in the " -# "model json data is used when existing, and full_model_name otherwise; default: empty", -# ) - -# def __init__(self, *args, **kwargs): -# super().__init__(*args, **kwargs) - -# # TODO: check that lengths match ... -# pass - -# self.models = [ -# Model() -# for x, y, z in zip(...) -# ] - -# def store_parts(self): -# parts = super().store_parts() - -# # build a combined string that represents the significant parameters -# params = [ -# f"model_{self.model.full_name}", -# ] -# parts.insert_before("version", "model_params", "__".join(params)) - -# return parts +class MultiModelParameters(BaseTask): + """ + General parameters for the model definition and the runtime measurement. + """ + + model_files = law.CSVParameter( + description="comma-separated list of json files containing information of models to be tested", + brace_expand=True, + ) + model_names = law.CSVParameter( + default=law.NO_STR, + description="comma-separated list of names of models defined in --model-files to use in output paths " + "instead of a hashed version of model_files; when set, the number of names must match the number of " + "model files; default: ()", + ) + model_labels = law.CSVParameter( + default=law.NO_STR, + description="when set, use this label in plots; when empty, the 'network_name' field in the " + "model json data is used when existing, and full_model_name otherwise; default: empty", + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # check that lengths match if initialized + from IPython import embed; embed() + if self.model_names[0] == law.NO_STR: + if self.model_labels[0] != law.NO_STR: + if len(self.model_files) != len(self.model_labels): + raise ValueError("the length of model_files and model_labels muss be the same") + elif self.model_labels[0] == law.NO_STR: + if len(self.model_files) != len(self.model_names): + raise ValueError("the length of model_files and model_names muss be the same") + else: + if len({len(self.model_files), len(self.model_names), len(self.model_labels)}) != 1: + raise ValueError("the length of model_names, model_files and model_labels muss be the same") + + # if not initialized, change size objects for them to match + if len(self.model_names) != len(self.model_files): + self.model_names = (law.NO_STR,) * len(self.model_files) + if len(self.model_labels) != len(self.model_files): + self.model_labels = (law.NO_STR,) * len(self.model_files) + + # define Model objects + self.models = [ + Model( + model_file=x, + name=y if y != law.NO_STR else None, + label=z if z != law.NO_STR else None, + ) + for x, y, z in zip(self.model_files, self.model_names, self.model_labels) + ] + + def store_parts(self): + parts = super().store_parts() + + # build a combined string that represents the significant parameters + params = [ + f"model_{model.full_name}" for model in self.models + ] + parts.insert_before("version", "model_params", "__".join(params)) + + return parts class BatchSizesParameters(BaseTask): diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index e13c79e..a8f49d0 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -12,7 +12,8 @@ from mlprof.tasks.base import CommandTask, PlotTask, view_output_plots from mlprof.tasks.parameters import ( - RuntimeParameters, ModelParameters, CMSSWParameters, BatchSizesParameters, CustomPlotParameters, + RuntimeParameters, ModelParameters, MultiModelParameters, CMSSWParameters, BatchSizesParameters, + CustomPlotParameters, ) from mlprof.tasks.sandboxes import CMSSWSandboxTask from mlprof.plotting.plotter import plot_batch_size_several_measurements @@ -176,16 +177,12 @@ def run(self): output = self.output() output.parent.touch() - # get name network for legend - model_data = self.model_data - network_name = model_data["network_name"] - # create the plot plot_batch_size_several_measurements( self.batch_sizes, [self.input().path], output.path, - [network_name], + [self.model.full_model_label], self.custom_plot_params, ) print("plot saved") @@ -193,6 +190,7 @@ def run(self): class PlotRuntimesMultipleParams( RuntimeParameters, + MultiModelParameters, BatchSizesParameters, PlotTask, CustomPlotParameters, @@ -205,15 +203,6 @@ class PlotRuntimesMultipleParams( sandbox = "bash::$MLP_BASE/sandboxes/plotting.sh" - model_files = law.CSVParameter( - description="comma-separated list of json files containing information of models to be tested", - brace_expand=True, - ) - model_names = law.CSVParameter( - default=(), - description="comma-separated list of names of models defined in --model-files to use in output paths; " - "when set, the number of names must match the number of model files; default: ()", - ) cmssw_versions = law.CSVParameter( default=(CMSSWParameters.cmssw_version._default,), description=f"comma-separated list of CMSSW versions; default: ({CMSSWParameters.cmssw_version._default},)", @@ -272,6 +261,7 @@ def run(self): # create the plot # TODO: maybe adjust labels + from IPython import embed; embed() plot_batch_size_several_measurements( self.batch_sizes, input_paths, From efe74f60238b94db255d4fe25751ddcb8ecf9994 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Thu, 1 Feb 2024 12:59:33 +0100 Subject: [PATCH 05/12] working version without docu --- mlprof/tasks/parameters.py | 16 ++----- mlprof/tasks/runtime.py | 95 +++++++++++++++++++++++++++++--------- 2 files changed, 78 insertions(+), 33 deletions(-) diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index 7f6fa74..f825e31 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -4,10 +4,7 @@ Collection of the recurrent luigi parameters for different tasks. """ -# from __future__ import annotations - import os -from dataclasses import dataclass import luigi import law @@ -15,15 +12,13 @@ from mlprof.tasks.base import BaseTask -@dataclass -class Model: +class Model(): - model_file: str - name: str | None = None - label: str | None = None + def __init__(self, model_file: str, name, label, **kwargs): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + self.model_file = model_file + self.name = name + self.label = label # cached data self._data = None @@ -193,7 +188,6 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # check that lengths match if initialized - from IPython import embed; embed() if self.model_names[0] == law.NO_STR: if self.model_labels[0] != law.NO_STR: if len(self.model_files) != len(self.model_labels): diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index a8f49d0..aeb4e59 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -41,7 +41,7 @@ def run(self): output.parent.touch() # get model data - model_data = self.model_data + model_data = self.model.data # resolve the graph path relative to the model file graph_path = os.path.expandvars(os.path.expanduser(model_data["file"])) @@ -78,9 +78,9 @@ def run(self): } # load the template content - if self.model_data["inference_engine"] == "tf": + if model_data["inference_engine"] == "tf": template = "$MLP_BASE/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py" - elif self.model_data["inference_engine"] == "onnx": + elif model_data["inference_engine"] == "onnx": template = "$MLP_BASE/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py" else: raise Exception("The only inference_engine supported are 'tf' and 'onnx'") @@ -222,34 +222,86 @@ def __init__(self, *args, **kwargs): if len(self.model_names) not in (n_models, 0): raise ValueError("the number of model names does not match the number of model files") - # list of sequences over which the product is performed - self.product_names = ["model_file", "cmssw_version", "scram_arch"] - self.product_sequences = [ + # list of sequences over which the product is performed for the requirements + self.product_names_req = ["model_file", "model_name", "cmssw_version", "scram_arch"] + self.product_sequences_req = [ list(zip(self.model_files, self.model_names or (n_models * [None]))), self.cmssw_versions, self.scram_archs, ] - params_to_write = [] + # list of sequences over which the product is performed for the output file name + self.product_names_out = ["model_name", "cmssw_version", "scram_arch"] + self.product_sequences_out = [ + tuple([model.full_name for model in self.models]), + self.cmssw_versions, + self.scram_archs, + ] + + # list of sequences over which the product is performed for the labels in plot + self.product_names_labels = ["model_label", "cmssw_version", "scram_arch"] + self.product_sequences_labels = [ + tuple([model.full_model_label for model in self.models]), + self.cmssw_versions, + self.scram_archs, + ] + + # define output product + self.output_product = list(itertools.product(*self.product_sequences_out)) + self.output_product_dict = [dict(zip(self.product_names_out, values)) for values in self.output_product] + + # retrieve the names of the params to be put in output + self.params_to_write_outputs = [] + for iparam, param in enumerate(self.product_names_out): + if len(self.product_sequences_out[iparam]) > 1: + self.params_to_write_outputs += [param] + + # create output representation to be used in output file name + self.output_product_params_to_write = [ + combination_dict[key_to_write] + for combination_dict in self.output_product_dict + for key_to_write in self.params_to_write_outputs + ] - # create params_to_write for labels if model_files or cmssw_versions is None? -> - # gets difficult with itertools product if only one param is changed + self.out_params_repr = "_".join(self.output_product_params_to_write) + + # define label product + self.labels_products = list(itertools.product(*self.product_sequences_labels)) + self.labels_products_dict = [dict(zip(self.product_names_labels, values)) for values in self.labels_products] + + # retrieve the names of the params to be put in labels + self.params_to_write_labels = [] + for iparam, param in enumerate(self.product_names_labels): + if len(self.product_sequences_labels[iparam]) > 1: + self.params_to_write_labels += [param] + + # create list of labels to plot + self.params_product_params_to_write = [ + combination_dict[key_to_write] + for combination_dict in self.labels_products_dict + for key_to_write in self.params_to_write_labels + ] + + def flatten_tuple(self, value): + for x in value: + if isinstance(x, tuple): + yield from self.flatten_tuple(x) + else: + yield x def requires(self): + flattened_product = [ + tuple(self.flatten_tuple(tuple_of_args)) for tuple_of_args in itertools.product(*self.product_sequences_req) + ] return [ - MergeRuntimes.req(self, **dict(zip(self.product_names, values))) - for values in itertools.product(*self.product_sequences) + MergeRuntimes.req(self, **dict(zip(self.product_names_req, values))) + for values in flattened_product ] def output(self): - # TODO: encode all important params in a human-readable way - # TODO: also check which parameters should go into store parts - return self.local_target("test.pdf") - # self.fill_params_to_write() - # all_params = self.factorize_params() - # all_params_list = ["_".join(all_params_item) for all_params_item in all_params] - # all_params_repr = "_".join(all_params_list) - # return self.local_target(f"runtime_plot_params_{all_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf") # noqa + return self.local_target( + f"runtime_plot_{self.out_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf" + ) @view_output_plots def run(self): @@ -260,12 +312,11 @@ def run(self): input_paths = [inp.path for inp in self.input()] # create the plot - # TODO: maybe adjust labels - from IPython import embed; embed() + plot_batch_size_several_measurements( self.batch_sizes, input_paths, output.path, - list(itertools.product(self.product_sequences)), + self.params_product_params_to_write, self.custom_plot_params, ) From 3e40af1e2886f9155b95b7be1ed16fb86721f845 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Thu, 1 Feb 2024 13:01:53 +0100 Subject: [PATCH 06/12] linting --- mlprof/tasks/runtime.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index aeb4e59..f1befa0 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -300,7 +300,7 @@ def requires(self): def output(self): return self.local_target( - f"runtime_plot_{self.out_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf" + f"runtime_plot_{self.out_params_repr}_different_batch_sizes_{self.batch_sizes_repr}.pdf", ) @view_output_plots From 6fe8d2536f8cc420f3dbebbdae61d33f7a5df8b8 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Tue, 6 Feb 2024 16:37:25 +0100 Subject: [PATCH 07/12] update docu --- README.md | 315 ++++++++++++++++------------------------ mlprof/tasks/runtime.py | 3 +- 2 files changed, 130 insertions(+), 188 deletions(-) diff --git a/README.md b/README.md index 08617aa..d53e744 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Lint and test](https://github.com/uhh-cms/MLProf/actions/workflows/lint.yml/badge.svg)](https://github.com/uhh-cms/MLProf/actions/workflows/lint.yml) -Tools for automated ML model performance tests in CMSSW +Tools for automated ML model performance tests in CMSSW (CMSSW version 13 and above). ## Quickstart @@ -46,7 +46,7 @@ flowchart TD ``` A task is run with the command `law run` followed by the name of the task. -A version, given by the argument `--version```, followed by the name of the version, is required. +A version, given by the argument `--version`, followed by the name of the version, is required. Each `version` has its own set of outputs for the different existing tasks. In law, the intermediate results (=the outputs to the different tasks) are saved locally in the corresponding directory (default path in MLPROF is ```data/name_of_the_task/CMSSW_architecture/model_identifier_with_run_parameters/name_of_the_version/```). @@ -56,7 +56,7 @@ Tasks in law are organized as a graph with dependencies. Therefore a "depth" for the different required tasks exists, depending on which task required which other task. In order to see the different required tasks for a single task, you might use the argument `--print-status -1`, which will show all required tasks and the existence or absence of their output for the given input parameters up to depth "-1", hence the deepest one. The called task with `law run` will have depth 0. -You might check the output path of a task with the argument `--print-output```, followed by the depth of the task. +You might check the output path of a task with the argument `--print-output`, followed by the depth of the task. If you want a finished task to be run anew without changing the version (e.g. do a new runtime measurement with a new training of the same original network), you might remove the previous outputs with the `--remove-output` argument, followed by the depth up to which to remove the outputs. There are three removal modes: - `a` (all: remove all outputs of the different tasks up to the given depth), @@ -98,29 +98,23 @@ flowchart TD A[CreateRuntimeConfig] --> B[MeasureRuntime] B --> |merge the results for different batch sizes| C[MergeRuntimes] C --> D[PlotRuntimes] - C --> E[PlotRuntimesMultipleCMSSW] - C --> F[PlotRuntimesMultipleNetworks] + C --> E[PlotRuntimesMultipleParams] ``` It is composed of four major types of tasks: -1. [CreateRuntimeConfig](#createruntimeconfig): This task creates the cmssw config file to run the inference, using a -json file for the model parameters. +1. [CreateRuntimeConfig](#createruntimeconfig): This task creates the cmssw config file to run the inference, using a json file for the model parameters. -2. [MeasureRuntime](#measureruntime): This task runs the network as many times as demanded in the arguments for a -single batch size and outputs a .csv file with the results of the timing measurements. +2. [MeasureRuntime](#measureruntime): This task runs the network as many times as demanded in the arguments for a single batch size and outputs a .csv file with the results of the timing measurements. -3. [MergeRuntimes](#mergeruntimes): This task merges the .csv output files with the required multiple batch sizes -from the [MeasureRuntime](#measureruntime) tasks to obtain a single .csv file containing the informations to plot. +3. [MergeRuntimes](#mergeruntimes): This task merges the .csv output files with the required multiple batch sizes from the [MeasureRuntime](#measureruntime) tasks to obtain a single .csv file containing the informations to plot. -4. [PlotRuntimes](#plotruntimes), [PlotRuntimesMultipleNetworks](#plotruntimesseveralnetworks), -[PlotRuntimesMultipleCMSSW](#plotruntimesmultiplecmssw): These tasks create the plots with the values stored in the -.csv file from [MergeRuntimes](#mergeruntimes). +4. [PlotRuntimes](#plotruntimes), [PlotRuntimesMultipleParams](#plotruntimesmultipleparams): These tasks create the plots with the values stored in the .csv file from [MergeRuntimes](#mergeruntimes). Calling the [PlotRuntimes](#plotruntimes) task triggers the whole pipeline with the correct arguments. The way to give the necessary informations about your model to MLProf is by using a json file. -Its structure is presented below in [Model file in json format](#model-file-in-json-format) +Its structure is presented below in [Model file in json format](#model-file-in-json-format). # Model file in json format @@ -128,7 +122,7 @@ The format of the file to give to MLProf is the following: ```json { - "file": "{absolute_path_to_you_pb_model_file}", + "file": "{path_to_your_pb_model_file}", "inputs": [ { "name": "{name_input_layer_1}", @@ -149,35 +143,34 @@ The format of the file to give to MLProf is the following: }, ... ], - "network_name": "{name_of_the_network_for_the_legend_of_the_plots_and_the_name_of_the_output_pdf_of_PlotRuntimesMultipleNetworks}" + "inference_engine": "{name_of_inference_engine_(either_tf_or_onnx)}", + "network_name": "{optional_name_of_the_network_for_the_labels}" } ``` -There are already a few examples of these configswith working paths for the networks in the "examples" folder. +There are already a few examples of these configs with working paths for the networks in the "examples" folder. # CreateRuntimeConfig -This task create the CMSSW config file to run the inference in the corresponding task, using the template file in -the `cmssw/MLProf/RuntimeModule/test/` directory. -The parameters of the inference except the batch sizes are fixed by the created -configuration file, therefore this task will be run again for every change in the inference (e.g. the number of runs for the statistics, the path to the graph to check...). +This task create the CMSSW config file to run the inference in the corresponding task, using the template file in the `cmssw/MLProf/RuntimeModule/test/` directory. +The parameters of the inference except the batch sizes are fixed by the created configuration file, therefore this task will be run again for every change in the inference (e.g. the number of runs for the statistics, the path to the graph to check...). ## Parameters: -- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/model1/model.json`. +- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/simple_dnn/model.json`. -- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of `--model-file```. default: empty. +- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of `--model-file`. default: empty. -- input-files: str. Comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented); when empty, random input values will be used. default: empty. +- model-label: str. When set, use this string for the model label in the plots from the plotting tasks. When empty, the `network_name` field in the model json data is used when existing, and model-name otherwise. default: empty. -- events: int. The number of events to read from each input file for averaging measurements. default: `1`. +- n-events: int. The number of events to read from each input file for averaging measurements. default: `1`. -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100`. +- n-calls: int. The number of calls to be performed per evaluation for averaging. default: `100`. -- warmup: int. The number of evaluations to be performed before starting the actual measurement. default: `10`. +- input-type: str. Define the kind of inputs used during the measurement, either 'random', 'incremental', 'zeros', or a path to a root file. default: random -- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_12_2_4`. +- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_13_3_1`. -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10`. +- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc12`. ## Output: @@ -188,17 +181,17 @@ configuration file, therefore this task will be run again for every change in th ```shell law run CreateRuntimeConfig \ --version test_simple_dnn \ - --model-file "$MLP_BASE/examples/model1/model.json" \ + --model-file "$MLP_BASE/examples/simple_dnn/model.json" \ --model-name dnn \ - --repetitions 500 \ - --cmssw-version CMSSW_12_2_4 + --n-calls 500 \ + --cmssw-version CMSSW_13_3_1 ``` # MeasureRuntime Task to provide the time measurements of the inference of a network in CMSSW, given the input parameters and a single batch size. -The batch size and the (```repetitions * events```) measured values in milliseconds are saved in csv format. +The batch size and the (```n-calls * n-events```) measured values in milliseconds are saved in csv format. ## Requires: @@ -208,265 +201,215 @@ The batch size and the (```repetitions * events```) measured values in milliseco - batch-size: int. the batch size to measure the runtime for. default: `1`. -- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/model1/model.json`. +- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/simple_dnn/model.json`. - model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of `--model-file`. default: empty. -- input-files: str. comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented). When empty, random input values will be used. default: empty. +- model-label: str. When set, use this string for the model label in the plots from the plotting tasks. When empty, the `network_name` field in the model json data is used when existing, and model-name otherwise. default: empty. -- events: int. The number of events to read from each input file for averaging measurements. default: `1`. +- n-events: int. The number of events to read from each input file for averaging measurements. default: `1`. -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100`. +- n-calls: int. The number of calls to be performed per evaluation for averaging. default: `100`. -- warmup: int. The number of evaluations to be performed before starting the actual measurement. default: `10`. +- input-type: str. Define the kind of inputs used during the measurement, either 'random', 'incremental', 'zeros', or a path to a root file. default: random -- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_12_2_4`. +- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_13_3_1`. -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10`. +- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc12`. ## Output: -- `runtime_bs_{batch-size}.csv```: The batch size and measured values of the runtime +- `runtime_bs_{batch-size}.csv`: The batch size and measured values of the runtime for each repetition and event. ## Example: ```shell law run MeasureRuntime --version test_simple_dnn \ - --model-file $MLP_BASE/examples/model1/model.json \ + --model-file $MLP_BASE/examples/simple_dnn/model.json \ --model-name dnn \ - --repetitions 500 \ - --cmssw-version CMSSW_12_2_4 \ + --n-calls 500 \ + --cmssw-version CMSSW_13_3_1 \ --batch-size 1 ``` # MergeRuntimes -This task merges the .csv output files with the required multiple batch sizes -from the different occurences of the `MeasureRuntime` task to obtain a single .csv -file containing the informations to plot. +This task merges the .csv output files with the required multiple batch sizes from the different occurences of the `MeasureRuntime` task to obtain a single .csv file containing the informations to plot. ## Requires: - The .csv files from the several occurences of `MeasureRuntime` (one for each batch size). ## Parameters: -- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4```. +- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4`. -- model-file: str. The absolute path of the json file containing the informations of the model to be tested. - default: `$MLP_BASE/examples/model1/model.json```. +- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/simple_dnn/model.json`. -- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of - `--model-file```. default: empty. +- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of `--model-file`. default: empty. -- input-files: str. comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented); - when empty, random input values will be used; default: empty +- model-label: str. When set, use this string for the model label in the plots from the plotting tasks. When empty, the `network_name` field in the model json data is used when existing, and model-name otherwise. default: empty. -- events: int. The number of events to read from each input file for averaging measurements. default: `1``` +- n-events: int. The number of events to read from each input file for averaging measurements. default: `1`. -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100``` +- n-calls: int. The number of calls to be performed per evaluation for averaging. default: `100`. -- warmup: int. The number of evaluations to be performed before starting the actual measurement. - default: `10``` +- input-type: str. Define the kind of inputs used during the measurement, either 'random', 'incremental', 'zeros', or a path to a root file. default: random -- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_12_2_4``` +- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_13_3_1` -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10``` +- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc12` ## Output: -- `runtime_bs_{batch_size_1}_{batch_size_2}_{...}.csv```: The batch size and measured values of the runtime -for each repetition and event in the several measurements. +- `runtime_bs_{batch_size_1}_{batch_size_2}_{...}.csv`: The batch size and measured values of the runtime for each repetition and event in the several measurements. ## Example: ```shell law run MergeRuntimes --version test_simple_dnn \ - --model-file $MLP_BASE/examples/model1/model.json \ + --model-file $MLP_BASE/examples/simple_dnn/model.json \ --model-name dnn \ - --repetitions 500 \ - --cmssw-version CMSSW_12_2_4 \ + --n-calls 500 \ + --cmssw-version CMSSW_13_3_1 \ --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 ``` # PlotRuntimes -This task plots the results of the runtime measurement against the given batch sizes. The points are -given by the median of the data series and the boundaries of the uncertainty bands are given by the 16 and -84 percentiles of the data series (Therefore the uncertainty band contains 68% of the data points, -which corresponds to a $1\sigma$ uncertainty for gaussian uncertainties). The number of inferences behind one -plotted data point is given by `events * repetitions```. +This task plots the results of the runtime measurement against the given batch sizes. +The points are given by the median of the data series and the boundaries of the uncertainty bands are given by the 16 and 84 percentiles of the data series (Therefore the uncertainty band contains 68% of the data points, which corresponds to a $1\sigma$ uncertainty for gaussian uncertainties). +The number of inferences behind one plotted data point is given by `n-events * n-calls`. ## Requires: - The .csv file from the `MergeRuntimes` task. ## Parameters: -- log-y: bool. Plot the y-axis values logarithmically; default: `False```. +- log-y: bool. Plot the y-axis values logarithmically; default: `False`. + +- bs-normalized: bool. Normalize the measured values with the batch size before plotting; default: `True`. -- bs-normalized: bool. Normalize the measured values with the batch size before plotting; default: `True```. +- filling: bool. Plot the errors as error bands instead of error bars; default: `True`. -- filling: bool. Plot the errors as error bands instead of error bars; default: `True```. +- top-right-label: str. When set, stick this string as label over the top right corner of the plot. default: empty. + +- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4`. + +- model-file: str. The absolute path of the json file containing the informations of the model to be tested. default: `$MLP_BASE/examples/simple_dnn/model.json`. + +- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of `--model-file`. default: empty. -- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4```. +- model-label: str. When set, use this string for the model label in the plots from the plotting tasks. When empty, the `network_name` field in the model json data is used when existing, and model-name otherwise. default: empty. -- model-file: str. The absolute path of the json file containing the informations of the model to be tested. - default: `$MLP_BASE/examples/model1/model.json```. +- n-events: int. The number of events to read from each input file for averaging measurements. default: `1`. -- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of - `--model-file```. default: empty. +- n-calls: int. The number of calls to be performed per evaluation for averaging. default: `100`. -- input-files: str. comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented); - when empty, random input values will be used; default: empty +- input-type: str. Define the kind of inputs used during the measurement, either 'random', 'incremental', 'zeros', or a path to a root file. default: random -- events: int. The number of events to read from each input file for averaging measurements. default: `1``` +- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_13_3_1` -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100``` +- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc12` -- warmup: int. The number of evaluations to be performed before starting the actual measurement. - default: `10``` +- file_types: str. Comma-separated types of the output plot files. default: "pdf". + +- plot_postfix: str. An arbitrary postfix that is added with two underscores to all paths of produced plots. default: empty. + +- view_cmd: str. A command to execute after the task has run to visualize plots right in the terminal (e.g. "imgcat" if installed). default: empty. -- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_12_2_4``` -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10``` ## Output: -- `runtime_plot_different_batch_sizes_{batch_size_1}_{batch_size_2}_{...}.pdf```: The plot of the runtime measurement -against the different batch sizes given. +- `runtime_plot_different_batch_sizes_{batch_size_1}_{batch_size_2}_{...}.pdf`: The plot of the runtime measurement against the different batch sizes given. ## Example: ```shell law run PlotRuntimes --version test_simple_dnn \ - --model-file $MLP_BASE/examples/model1/model.json \ + --model-file $MLP_BASE/examples/simple_dnn/model.json \ --model-name dnn \ - --repetitions 500 \ - --cmssw-version CMSSW_12_2_4 \ + --model-label "dnn with CMSSW_13_3_1" \ + --n-calls 500 \ + --cmssw-version CMSSW_13_3_1 \ --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ --log-y False \ --bs-normalized True ``` -# PlotRuntimesMultipleNetworks +# PlotRuntimesMultipleParams -This task plots the results of the runtime measurement against the given batch sizes for several models. -The model-files argument is required and replaces the module-file argument. The points are -given by the median of the data series and the boundaries of the uncertainty bands are given by the 16 and -84 percentiles of the data series (Therefore the uncertainty band contains 68% of the data points, -which corresponds to a $1\sigma$ uncertainty for gaussian uncertainties). The number of inferences behind one -plotted data point is given by `events * repetitions```. +This task plots the results of the runtime measurement against the given batch sizes for several values of the given parameters. +The model-files argument is required and replaces the module-file argument. +The cmssw-versions argument replaces the cmssw-version arguments, but both have the same default. +The scram-archs argument replaces the scram-arch argument, but both have the same default. +These arguments accept brace expansions, see below in the Example subsection. +The model-names and model-labels arguments replace repectively the model-name and model-label arguments and are empty by default. +The points are given by the median of the data series and the boundaries of the uncertainty bands are given by the 16 and 84 percentiles of the data series (Therefore the uncertainty band contains 68% of the data points, which corresponds to a $1\sigma$ uncertainty for gaussian uncertainties). +The number of inferences behind one plotted data point is given by `n-events * n-calls`. ## Requires: -- The .csv file from the `MergeRuntimes` task. +- The .csv files from the `MergeRuntimes` task. ## Parameters: -- model-files: str. The comma-separated list of the absolute paths of the json files containing the - informations of the model to be tested. No default value. - -- log-y: bool. Plot the y-axis values logarithmically; default: `False```. - -- bs-normalized: bool. Normalize the measured values with the batch size before plotting; default: `True```. - -- filling: bool. Plot the errors as error bands instead of error bars; default: `True```. - -- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4```. - -- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of - `--model-file```. default: empty. +- model-files: str. The comma-separated list of the absolute paths of the json files containing the informations of the model to be tested. No default value. -- input-files: str. comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented); - when empty, random input values will be used; default: empty +- cmssw-versions: str. The comma-separated list of CMSSW versions used for the inference. default: `CMSSW_13_3_1` -- events: int. The number of events to read from each input file for averaging measurements. default: `1``` +- scram-archs: str. The comma-separated list of SCRAM architectures used for the inference. default: `slc7_amd64_gcc12` -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100``` +- model-names: str. The comma-separated list of model names. When set, use these names for the path used when storing outputs instead of a hashed version of `--model-file`. default: empty. -- warmup: int. The number of evaluations to be performed before starting the actual measurement. - default: `10``` +- model-labels: str. The comma-separated list of model labels. When set, use these strings for the model labels in the plots from the plotting tasks. When empty, the `network_name` fields in the models json data is used when existing, and model-names otherwise. default: empty. -- cmssw-version: str. The CMSSW version used for the inference. default: `CMSSW_12_2_4``` +- log-y: bool. Plot the y-axis values logarithmically; default: `False`. -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10``` +- bs-normalized: bool. Normalize the measured values with the batch size before plotting; default: `True`. -## Output: -- `runtime_plot_networks_{network_name_1}_{network_name_2}_{...}_different_batch_sizes_{batch_size_1}_{batch_size_2}_{...}.pdf```: The plot of the runtime measurement -against the different batch sizes given. - -## Example: +- top-right-label: str. When set, stick this string as label over the top right corner of the plot. default: empty. -```shell -law run PlotRuntimesMultipleNetworks --version test_several_networks \ - --model-files $MLP_BASE/examples/model1/model.json,$MLP_BASE/examples/cnn/model_cnn.json\ - --repetitions 500 \ - --cmssw-version CMSSW_12_2_4 \ - --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ - --log-y False \ - --bs-normalized True -``` +- filling: bool. Plot the errors as error bands instead of error bars; default: `True`. -# PlotRuntimesMultipleCMSSW +- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4`. -This task plots the results of the runtime measurement against the given batch sizes for several CMSSW versions. -The cmssw-versions argument replaces the cmssw-version argument. The cmssw-versions argument accepts brace expansions, -see below in the Example subsection. The points are -given by the median of the data series and the boundaries of the uncertainty bands are given by the 16 and -84 percentiles of the data series (Therefore the uncertainty band contains 68% of the data points, -which corresponds to a $1\sigma$ uncertainty for gaussian uncertainties). The number of inferences behind one -plotted data point is given by `events * repetitions```. +- n-events: int. The number of events to read from each input file for averaging measurements. default: `1` -## Requires: -- The .csv file from the `MergeRuntimes` task. - -## Parameters: -- cmssw-versions: str. The comma separated list of CMSSW version used for the inference. - default: `"CMSSW_12_2_4","CMSSW_12_2_2"``` +- n-calls: int. The number of calls to be performed per evaluation for averaging. default: `100` -- log-y: bool. Plot the y-axis values logarithmically; default: `False```. +- input-type: str. Define the kind of inputs used during the measurement, either 'random', 'incremental', 'zeros', or a path to a root file. default: random. -- bs-normalized: bool. Normalize the measured values with the batch size before plotting; default: `True```. +- file_types: str. Comma-separated types of the output plot files. default: "pdf". -- filling: bool. Plot the errors as error bands instead of error bars; default: `True```. +- plot_postfix: str. An arbitrary postfix that is added with two underscores to all paths of produced plots. default: empty. -- batch-sizes: int. The comma-separated list of batch sizes to be tested; default: `1,2,4```. - -- model-file: str. The absolute path of the json file containing the informations of the model to be tested. - default: `$MLP_BASE/examples/model1/model.json```. - -- model-name: str. When set, use this name for the path used when storing outputs instead of a hashed version of - `--model-file```. default: empty. - -- input-files: str. comma-separated list of absolute paths of input files for the CMSSW analyzer (TODO: not implemented); - when empty, random input values will be used; default: empty - -- events: int. The number of events to read from each input file for averaging measurements. default: `1``` - -- repetitions: int. The number of repetitions to be performed per evaluation for averaging. default: `100``` - -- warmup: int. The number of evaluations to be performed before starting the actual measurement. - default: `10``` - -- scram-arch: str. The SCRAM architecture used for the inference. default: `slc7_amd64_gcc10``` +- view_cmd: str. A command to execute after the task has run to visualize plots right in the terminal (e.g. "imgcat" if installed). default: empty. ## Output: -- `runtime_plot__multiple_cmssw_{cmssw_version_1}_{cmssw_version_2}_{...}_different_batch_sizes_{batch_size_1}_{batch_size_2}_{...}.pdf```: The plot of the runtime measurement -against the different batch sizes given. +- `runtime_plot_networks_{param_1}_{param_2}_{...}_different_batch_sizes_{batch_size_1}_{batch_size_2}_{...}.pdf`: The plot of the runtime measurements against the different batch sizes given. ## Example: ```shell -law run PlotRuntimesMultipleCMSSW --version test_multiple_cmssw \ - --cmssw-versions CMSSW_12_2_4,CMSSW_12_2_2 \ - --model-file $MLP_BASE/examples/model1/model.json \ - --repetitions 500 \ - --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ - --log-y False +law run PlotRuntimesMultipleParams --version test_several_networks \ + --model-files $MLP_BASE/examples/simple_dnn/model.json,$MLP_BASE/examples/simple_dnn/model_onnx.json\ + --model-names "dnn","dnn_onnx"\ + --model-labels "dnn","dnn onnx"\ + --cmssw-versions CMSSW_13_3_1,CMSSW_13_2_4 \ + --n-calls 500 \ + --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ + --log-y False \ + --bs-normalized True \ + --top-right-label "$\sqrt{s}=13.6$ TeV" ``` equivalent to the brace expanded version: ```shell -law run PlotRuntimesMultipleCMSSW --version test_multiple_cmssw \ - --cmssw-versions "CMSSW_12_2_{2,4}" \ - --model-file $MLP_BASE/examples/model1/model.json \ - --repetitions 500 \ - --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ - --log-y False +law run PlotRuntimesMultipleParams --version test_several_networks \ + --model-files "$MLP_BASE/examples/{simple_dnn/model,simple_dnn/model_onnx}.json"\ + --model-names "dnn","dnn_onnx"\ + --model-labels "dnn","dnn onnx"\ + --cmssw-versions "CMSSW_13_{3_1,2_4}" \ + --n-calls 500 \ + --batch-sizes 1,2,4,8,16,32,64,128,256,512,1024 \ + --log-y False \ + --bs-normalized True \ + --top-right-label "$\sqrt{s}=13.6$ TeV" ``` - diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index f1befa0..4658695 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -277,9 +277,8 @@ def __init__(self, *args, **kwargs): # create list of labels to plot self.params_product_params_to_write = [ - combination_dict[key_to_write] + tuple([combination_dict[key_to_write] for key_to_write in self.params_to_write_labels]) for combination_dict in self.labels_products_dict - for key_to_write in self.params_to_write_labels ] def flatten_tuple(self, value): From aa7b1222c16108399eb241fc2b6cc1eca717a541 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Tue, 6 Feb 2024 16:48:04 +0100 Subject: [PATCH 08/12] update docu #2 --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index d53e744..9465fa8 100644 --- a/README.md +++ b/README.md @@ -41,8 +41,7 @@ flowchart TD A[CreateRuntimeConfig] --> B[MeasureRuntime] B --> |merge the results for different batch sizes| C[MergeRuntimes] C --> D[PlotRuntimes] - C --> E[PlotRuntimesMultipleCMSSW] - C --> F[PlotRuntimesMultipleNetworks] + C --> E[PlotRuntimesMultipleParams] ``` A task is run with the command `law run` followed by the name of the task. @@ -85,7 +84,6 @@ law run PlotRuntimes --version test_mlprof --print-output 0 This tools uses the c++ `` library for runtime measurements and (soon) [IgProf](https://igprof.org/) for the memory profiling. It allows for the measurement of TensorFlow graphs with several input layers. -The inputs can be up to 3 dimensional. As this tool is set to work in CMSSW, it requires a frozen graph (it is recommended to use the cmsml [save_graph](https://cmsml.readthedocs.io/en/latest/api/tensorflow.html#cmsml.tensorflow.save_graph) function with the argument "True" for variables_to_constant). From f9ee8c7d1dc1e3791fd547a905779ae8fb12ffd2 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Tue, 6 Feb 2024 16:56:13 +0100 Subject: [PATCH 09/12] update docu #3 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9465fa8..7adbd39 100644 --- a/README.md +++ b/README.md @@ -83,8 +83,8 @@ law run PlotRuntimes --version test_mlprof --print-output 0 ## Profiling This tools uses the c++ `` library for runtime measurements and (soon) [IgProf](https://igprof.org/) for the memory profiling. -It allows for the measurement of TensorFlow graphs with several input layers. -As this tool is set to work in CMSSW, it requires a frozen graph (it is recommended to use the cmsml [save_graph](https://cmsml.readthedocs.io/en/latest/api/tensorflow.html#cmsml.tensorflow.save_graph) function with the argument "True" for variables_to_constant). +It allows for the measurement of TensorFlow graphs (.pb) and ONNX models (.onnx) with several input layers. +As this tool is set to work in CMSSW, it requires a frozen graph for TensorFlow models (it is recommended to use the cmsml [save_graph](https://cmsml.readthedocs.io/en/latest/api/tensorflow.html#cmsml.tensorflow.save_graph) function with the argument "True" for variables_to_constant). ## Runtime measurement From a1bd97953af37fe14cd09430dfcae3f8083c99a2 Mon Sep 17 00:00:00 2001 From: Nathan Prouvost Date: Tue, 26 Mar 2024 13:32:09 +0100 Subject: [PATCH 10/12] change formatter to yaml, denest lengths checks in MultiModelParameter, add super and inheritance to object in Model --- mlprof/tasks/parameters.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index f825e31..9c800e4 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -12,10 +12,12 @@ from mlprof.tasks.base import BaseTask -class Model(): +class Model(object): def __init__(self, model_file: str, name, label, **kwargs): + super().__init__(**kwargs) + self.model_file = model_file self.name = name self.label = label @@ -26,7 +28,7 @@ def __init__(self, model_file: str, name, label, **kwargs): @property def data(self): if self._data is None: - self._data = law.LocalFileTarget(self.model_file).load(formatter="json") + self._data = law.LocalFileTarget(self.model_file).load(formatter="yaml") return self._data @property @@ -189,15 +191,13 @@ def __init__(self, *args, **kwargs): # check that lengths match if initialized if self.model_names[0] == law.NO_STR: - if self.model_labels[0] != law.NO_STR: - if len(self.model_files) != len(self.model_labels): - raise ValueError("the length of model_files and model_labels muss be the same") + if (self.model_labels[0] != law.NO_STR) and (len(self.model_files) != len(self.model_labels)): + raise ValueError("the lengths of model_files and model_labels must be the same") elif self.model_labels[0] == law.NO_STR: if len(self.model_files) != len(self.model_names): - raise ValueError("the length of model_files and model_names muss be the same") - else: - if len({len(self.model_files), len(self.model_names), len(self.model_labels)}) != 1: - raise ValueError("the length of model_names, model_files and model_labels muss be the same") + raise ValueError("the lengths of model_files and model_names must be the same") + elif len({len(self.model_files), len(self.model_names), len(self.model_labels)}) != 1: + raise ValueError("the lengths of model_names, model_files and model_labels must be the same") # if not initialized, change size objects for them to match if len(self.model_names) != len(self.model_files): From 8781e78c9417fe2f88167de033d3acda5a3df864 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Wed, 27 Mar 2024 18:22:14 +0100 Subject: [PATCH 11/12] Refactor plugin setup. --- .../ONNXRuntimeModule/plugins/BuildFile.xml | 6 - .../test/onnx_runtime_template_cfg.py | 64 ----------- .../RuntimeMeasurement/plugins/BuildFile.xml | 25 ++++- .../plugins/ONNXInference.cc} | 91 ++++++---------- .../plugins/{TFRuntime.cpp => TFInference.cc} | 103 +++++++++--------- .../test/onnx_runtime_template_cfg.py | 63 +++++++++++ .../test/tf_runtime_template_cfg.py | 33 +++--- cmssw/MLProf/Utils/BuildFile.xml | 5 + cmssw/MLProf/Utils/interface/utils.h | 27 +++-- cmssw/install_sandbox.sh | 3 + examples/cnn/model.json | 16 --- examples/dnn_2_inputs/model.json | 20 ---- examples/simple_dnn/model.json | 16 --- mlprof/tasks/parameters.py | 24 ++-- mlprof/tasks/runtime.py | 11 +- 15 files changed, 232 insertions(+), 275 deletions(-) delete mode 100644 cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml delete mode 100644 cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py rename cmssw/MLProf/{ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp => RuntimeMeasurement/plugins/ONNXInference.cc} (67%) rename cmssw/MLProf/RuntimeMeasurement/plugins/{TFRuntime.cpp => TFInference.cc} (67%) create mode 100644 cmssw/MLProf/RuntimeMeasurement/test/onnx_runtime_template_cfg.py create mode 100644 cmssw/MLProf/Utils/BuildFile.xml delete mode 100644 examples/cnn/model.json delete mode 100644 examples/dnn_2_inputs/model.json delete mode 100644 examples/simple_dnn/model.json diff --git a/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml b/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml deleted file mode 100644 index d807b98..0000000 --- a/cmssw/MLProf/ONNXRuntimeModule/plugins/BuildFile.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py b/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py deleted file mode 100644 index 1724a65..0000000 --- a/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -import FWCore.ParameterSet.Config as cms -from FWCore.ParameterSet.VarParsing import VarParsing - -# setup minimal options -options = VarParsing("python") -options.register( - "batchSizes", - [1], - VarParsing.multiplicity.list, - VarParsing.varType.int, - "Batch sizes to be tested", -) -options.register( - "csvFile", - "results.csv", - VarParsing.multiplicity.singleton, - VarParsing.varType.string, - "The path of the csv file to save results", -) -options.parseArguments() - - -# define the process to run -process = cms.Process("MLPROF") - -# minimal configuration -process.load("FWCore.MessageService.MessageLogger_cfi") -process.MessageLogger.cerr.FwkReport.reportEvery = 1 -process.maxEvents = cms.untracked.PSet( - input=cms.untracked.int32(__N_EVENTS__), # noqa -) -process.source = cms.Source( - "PoolSource", - fileNames=cms.untracked.vstring(*__INPUT_FILES__), # noqa -) - -# process options -process.options = cms.untracked.PSet( - allowUnscheduled=cms.untracked.bool(True), - wantSummary=cms.untracked.bool(False), -) - -# setup options for multithreaded -process.options.numberOfThreads=cms.untracked.uint32(1) -process.options.numberOfStreams=cms.untracked.uint32(0) -process.options.numberOfConcurrentLuminosityBlocks=cms.untracked.uint32(1) - - -# setup MyPlugin by loading the auto-generated cfi (see MyPlugin.fillDescriptions) -process.load("MLProf.ONNXRuntimeModule.onnxRuntimePlugin_cfi") -process.onnxRuntimePlugin.graphPath = cms.string("__GRAPH_PATH__") -process.onnxRuntimePlugin.inputTensorNames = cms.vstring(__INPUT_TENSOR_NAMES__) # noqa -process.onnxRuntimePlugin.outputTensorNames = cms.vstring(__OUTPUT_TENSOR_NAMES__) # noqa -process.onnxRuntimePlugin.outputFile = cms.string(options.csvFile) -process.onnxRuntimePlugin.inputType = cms.string("__INPUT_TYPE__") -process.onnxRuntimePlugin.inputRanks = cms.vint32(__INPUT_RANKS__) # noqa -process.onnxRuntimePlugin.flatInputSizes = cms.vint32(__FLAT_INPUT_SIZES__) # noqa -process.onnxRuntimePlugin.batchSize = cms.int32(options.batchSizes[0]) -process.onnxRuntimePlugin.nCalls = cms.int32(__N_CALLS__) # noqa - -# define what to run in the path -process.p = cms.Path(process.onnxRuntimePlugin) diff --git a/cmssw/MLProf/RuntimeMeasurement/plugins/BuildFile.xml b/cmssw/MLProf/RuntimeMeasurement/plugins/BuildFile.xml index 5fe6592..14e5d4a 100644 --- a/cmssw/MLProf/RuntimeMeasurement/plugins/BuildFile.xml +++ b/cmssw/MLProf/RuntimeMeasurement/plugins/BuildFile.xml @@ -1,6 +1,21 @@ - - - - + + + + - + + + + + + + + + + + + + + + + diff --git a/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp b/cmssw/MLProf/RuntimeMeasurement/plugins/ONNXInference.cc similarity index 67% rename from cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp rename to cmssw/MLProf/RuntimeMeasurement/plugins/ONNXInference.cc index cc4f804..61c0720 100644 --- a/cmssw/MLProf/ONNXRuntimeModule/plugins/ONNXPluginRuntime.cpp +++ b/cmssw/MLProf/RuntimeMeasurement/plugins/ONNXInference.cc @@ -1,6 +1,5 @@ /* - * Example plugin to demonstrate the direct multi-threaded inference with ONNX - * Runtime. + * Plugin to measure the inference runtime of an onnx model. */ #include @@ -16,24 +15,24 @@ #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/stream/EDAnalyzer.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "MLProf/Utils/interface/utils.h" #include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h" +#include "MLProf/Utils/interface/utils.h" + using namespace cms::Ort; -class ONNXRuntimePlugin - : public edm::stream::EDAnalyzer> { - public: - explicit ONNXRuntimePlugin(const edm::ParameterSet &, const ONNXRuntime *); - static void fillDescriptions(edm::ConfigurationDescriptions &); +class ONNXInference : public edm::stream::EDAnalyzer> { +public: + explicit ONNXInference(const edm::ParameterSet&, const ONNXRuntime*); + ~ONNXInference(){}; - static std::unique_ptr initializeGlobalCache( - const edm::ParameterSet &); - static void globalEndJob(const ONNXRuntime *); + static void fillDescriptions(edm::ConfigurationDescriptions&); + static std::unique_ptr initializeGlobalCache(const edm::ParameterSet&); + static void globalEndJob(const ONNXRuntime*); - private: +private: void beginJob(); - void analyze(const edm::Event &, const edm::EventSetup &); + void analyze(const edm::Event&, const edm::EventSetup&); void endJob(); inline float drawNormal() { return normalPdf_(rndGen_); } @@ -60,8 +59,7 @@ class ONNXRuntimePlugin FloatArrays inputArrays_; // each stream hosts its own data }; -void ONNXRuntimePlugin::fillDescriptions( - edm::ConfigurationDescriptions &descriptions) { +void ONNXInference::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { // defining this function will lead to a *_cfi file being generated when // compiling edm::ParameterSetDescription desc; @@ -89,12 +87,9 @@ void ONNXRuntimePlugin::fillDescriptions( descriptions.addWithDefaultLabel(desc); } -ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, - const ONNXRuntime *cache) - : inputTensorNames_( - iConfig.getParameter>("inputTensorNames")), - outputTensorNames_( - iConfig.getParameter>("outputTensorNames")), +ONNXInference::ONNXInference(const edm::ParameterSet& iConfig, const ONNXRuntime* cache) + : inputTensorNames_(iConfig.getParameter>("inputTensorNames")), + outputTensorNames_(iConfig.getParameter>("outputTensorNames")), outputFile_(iConfig.getParameter("outputFile")), inputTypeStr_(iConfig.getParameter("inputType")), inputRanks_(iConfig.getParameter>("inputRanks")), @@ -107,34 +102,28 @@ ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, normalPdf_(0.0, 1.0) { // the number of input ranks must match the number of input tensors if ((int)inputRanks_.size() != nInputs_) { - throw cms::Exception("InvalidInputRanks") - << "number of input ranks must match number of input tensors"; + throw cms::Exception("InvalidInputRanks") << "number of input ranks must match number of input tensors"; } // the input must be at least 1 dimensional for (auto rank : inputRanks_) { if (rank < 1) { - throw cms::Exception("InvalidRank") - << "only ranks above 0 are supported, got " << rank; + throw cms::Exception("InvalidRank") << "only ranks above 0 are supported, got " << rank; } } // the sum of ranks must match the number of flat input sizes - if (std::accumulate(inputRanks_.begin(), inputRanks_.end(), 0) != - (int)flatInputSizes_.size()) { + if (std::accumulate(inputRanks_.begin(), inputRanks_.end(), 0) != (int)flatInputSizes_.size()) { throw cms::Exception("InvalidFlatInputSizes") - << "sum of input ranks must match number of flat input sizes, got " - << flatInputSizes_.size(); + << "sum of input ranks must match number of flat input sizes, got " << flatInputSizes_.size(); } // batch size must be positive if (batchSize_ < 1) { - throw cms::Exception("InvalidBatchSize") - << "batch sizes must be positive, got " << batchSize_; + throw cms::Exception("InvalidBatchSize") << "batch size must be positive, got " << batchSize_; } // input sizes must be positive for (auto size : flatInputSizes_) { if (size < 1) { - throw cms::Exception("InvalidInputSize") - << "input sizes must be positive, got " << size; + throw cms::Exception("InvalidInputSize") << "input sizes must be positive, got " << size; } } // check the input type @@ -146,15 +135,13 @@ ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, inputType_ = mlprof::InputType::Zeros; } else { throw cms::Exception("InvalidInputType") - << "input type must be either 'incremental', 'zeros' or 'random', got " - << inputTypeStr_; + << "input type must be either 'incremental', 'zeros' or 'random', got " << inputTypeStr_; } // initialize the input_shapes array with inputRanks_ and flatInputSizes_ int i = 0; for (auto rank : inputRanks_) { - std::vector input_shape(flatInputSizes_.begin() + i, - flatInputSizes_.begin() + i + rank); + std::vector input_shape(flatInputSizes_.begin() + i, flatInputSizes_.begin() + i + rank); input_shape.insert(input_shape.begin(), batchSize_); input_shapes_.push_back(input_shape); i += rank; @@ -167,26 +154,20 @@ ONNXRuntimePlugin::ONNXRuntimePlugin(const edm::ParameterSet &iConfig, } } -std::unique_ptr ONNXRuntimePlugin::initializeGlobalCache( - const edm::ParameterSet &iConfig) { - return std::make_unique( - edm::FileInPath(iConfig.getParameter("graphPath")) - .fullPath()); +std::unique_ptr ONNXInference::initializeGlobalCache(const edm::ParameterSet& iConfig) { + return std::make_unique(edm::FileInPath(iConfig.getParameter("graphPath")).fullPath()); } -void ONNXRuntimePlugin::globalEndJob(const ONNXRuntime *cache) {} +void ONNXInference::globalEndJob(const ONNXRuntime* cache) {} -void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, - const edm::EventSetup &iSetup) { +void ONNXInference::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { for (int i = 0; i < nInputs_; i++) { - std::vector &group_data = inputArrays_[i]; + std::vector& group_data = inputArrays_[i]; // fill the input for (int i = 0; i < (int)group_data.size(); i++) { - group_data[i] = - inputType_ == mlprof::InputType::Incremental - ? float(i) - : (inputType_ == mlprof::InputType::Zeros ? float(0) - : drawNormal()); + group_data[i] = inputType_ == mlprof::InputType::Incremental + ? float(i) + : (inputType_ == mlprof::InputType::Zeros ? float(0) : drawNormal()); } } @@ -195,16 +176,14 @@ void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, // pre calls to "warm up" for (int r = 0; r < nPreCalls_; r++) { - outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, - outputTensorNames_, batchSize_); + outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, outputTensorNames_, batchSize_); } // actual calls to measure runtimes std::vector runtimes; for (int r = 0; r < nCalls_; r++) { auto start = std::chrono::high_resolution_clock::now(); - outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, - outputTensorNames_, batchSize_); + outputs = globalCache()->run(inputTensorNames_, inputArrays_, input_shapes_, outputTensorNames_, batchSize_); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration runtime_in_seconds = (end - start); runtimes.push_back(runtime_in_seconds.count() * 1000); @@ -214,4 +193,4 @@ void ONNXRuntimePlugin::analyze(const edm::Event &iEvent, mlprof::writeRuntimes(outputFile_, batchSize_, runtimes); } -DEFINE_FWK_MODULE(ONNXRuntimePlugin); +DEFINE_FWK_MODULE(ONNXInference); diff --git a/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp b/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc similarity index 67% rename from cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp rename to cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc index e0f2425..dbd56fa 100644 --- a/cmssw/MLProf/RuntimeMeasurement/plugins/TFRuntime.cpp +++ b/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc @@ -1,5 +1,5 @@ /* - * Plugin to measure the runtime of a tensorflow graph. + * Plugin to measure the inference runtime of a tensorflow model. */ #include @@ -19,13 +19,12 @@ #include "MLProf/Utils/interface/utils.h" -class TFRuntime : public edm::stream::EDAnalyzer> { +class TFInference : public edm::stream::EDAnalyzer> { public: - explicit TFRuntime(const edm::ParameterSet&, const tensorflow::SessionCache*); - ~TFRuntime(){}; + explicit TFInference(const edm::ParameterSet&, const tensorflow::SessionCache*); + ~TFInference(){}; static void fillDescriptions(edm::ConfigurationDescriptions&); - static std::unique_ptr initializeGlobalCache(const edm::ParameterSet&); static void globalEndJob(const tensorflow::SessionCache*); @@ -44,7 +43,7 @@ class TFRuntime : public edm::stream::EDAnalyzer inputRanks_; std::vector flatInputSizes_; - std::vector batchSizes_; + int batchSize_; int nCalls_; // other members @@ -57,16 +56,16 @@ class TFRuntime : public edm::stream::EDAnalyzer TFRuntime::initializeGlobalCache(const edm::ParameterSet& params) { +std::unique_ptr TFInference::initializeGlobalCache(const edm::ParameterSet& params) { std::string graphPath = edm::FileInPath(params.getParameter("graphPath")).fullPath(); // cpu-only for now tensorflow::Options options{tensorflow::Backend::cpu}; return std::make_unique(graphPath, options); } -void TFRuntime::globalEndJob(const tensorflow::SessionCache* cache) {} +void TFInference::globalEndJob(const tensorflow::SessionCache* cache) {} -void TFRuntime::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { +void TFInference::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; // the path to the file containing the graph @@ -85,21 +84,21 @@ void TFRuntime::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { // (for a graph with a 1D and a 2D input tensor, this would be a vector of three values) desc.add>("flatInputSizes"); // batch sizes to test - desc.add>("batchSizes"); + desc.add("batchSize"); // the number of calls to the graph to measure the runtime desc.add("nCalls"); descriptions.addWithDefaultLabel(desc); } -TFRuntime::TFRuntime(const edm::ParameterSet& config, const tensorflow::SessionCache* cache) +TFInference::TFInference(const edm::ParameterSet& config, const tensorflow::SessionCache* cache) : inputTensorNames_(config.getParameter>("inputTensorNames")), outputTensorNames_(config.getParameter>("outputTensorNames")), outputFile_(config.getParameter("outputFile")), inputTypeStr_(config.getParameter("inputType")), inputRanks_(config.getParameter>("inputRanks")), flatInputSizes_(config.getParameter>("flatInputSizes")), - batchSizes_(config.getParameter>("batchSizes")), + batchSize_(config.getParameter("batchSize")), nCalls_(config.getParameter("nCalls")), nInputs_(inputTensorNames_.size()), nPreCalls_(10), @@ -122,10 +121,8 @@ TFRuntime::TFRuntime(const edm::ParameterSet& config, const tensorflow::SessionC << "sum of input ranks must match number of flat input sizes, got " << flatInputSizes_.size(); } // batch size must be positive - for (auto batchSize : batchSizes_) { - if (batchSize < 1) { - throw cms::Exception("InvalidBatchSize") << "batch sizes must be positive, got " << batchSize; - } + if (batchSize_ < 1) { + throw cms::Exception("InvalidBatchSize") << "batch size must be positive, got " << batchSize_; } // input sizes must be positive for (auto size : flatInputSizes_) { @@ -146,11 +143,11 @@ TFRuntime::TFRuntime(const edm::ParameterSet& config, const tensorflow::SessionC } } -void TFRuntime::beginJob() {} +void TFInference::beginJob() {} -void TFRuntime::endJob() {} +void TFInference::endJob() {} -tensorflow::Tensor TFRuntime::createInputTensor(int rank, std::vector shape) { +tensorflow::Tensor TFInference::createInputTensor(int rank, std::vector shape) { // convert the shape to a tf shape tensorflow::TensorShape tShape; for (auto dim : shape) { @@ -163,50 +160,48 @@ tensorflow::Tensor TFRuntime::createInputTensor(int rank, std::vector shape // fill it float* data = tensor.flat().data(); for (int i = 0; i < tensor.NumElements(); i++, data++) { - *data = inputType_ == mlprof::InputType::Incremental ? float(i) : - inputType_ == mlprof::InputType::Zeros ? float(0) : - drawNormal(); + *data = inputType_ == mlprof::InputType::Incremental ? float(i) + : inputType_ == mlprof::InputType::Zeros ? float(0) + : drawNormal(); } return tensor; } -void TFRuntime::analyze(const edm::Event& event, const edm::EventSetup& setup) { - for (int batchSize : batchSizes_) { - // prepare inputs - std::vector> inputs; - int sizeOffset = 0; - for (int i = 0; i < nInputs_; i++) { - // build the shape - std::vector shape = {batchSize}; - for (int j = 0; j < inputRanks_[i]; j++, sizeOffset++) { - shape.push_back(flatInputSizes_[sizeOffset]); - } - // create and save it - inputs.push_back({inputTensorNames_[i], createInputTensor(inputRanks_[i], shape)}); +void TFInference::analyze(const edm::Event& event, const edm::EventSetup& setup) { + // prepare inputs + std::vector> inputs; + int sizeOffset = 0; + for (int i = 0; i < nInputs_; i++) { + // build the shape + std::vector shape = {batchSize_}; + for (int j = 0; j < inputRanks_[i]; j++, sizeOffset++) { + shape.push_back(flatInputSizes_[sizeOffset]); } + // create and save it + inputs.push_back({inputTensorNames_[i], createInputTensor(inputRanks_[i], shape)}); + } - // prepare output vectors - std::vector outputs; - - // pre calls to "warm up" - for (int r = 0; r < nPreCalls_; r++) { - tensorflow::run(session_, inputs, outputTensorNames_, &outputs); - } + // prepare output vectors + std::vector outputs; - // actual calls to measure runtimes - std::vector runtimes; - for (int r = 0; r < nCalls_; r++) { - auto start = std::chrono::high_resolution_clock::now(); - tensorflow::run(session_, inputs, outputTensorNames_, &outputs); - auto end = std::chrono::high_resolution_clock::now(); - std::chrono::duration runtime_in_seconds = (end - start); - runtimes.push_back(runtime_in_seconds.count() * 1000); - } + // pre calls to "warm up" + for (int r = 0; r < nPreCalls_; r++) { + tensorflow::run(session_, inputs, outputTensorNames_, &outputs); + } - // save them - mlprof::writeRuntimes(outputFile_, batchSize, runtimes); + // actual calls to measure runtimes + std::vector runtimes; + for (int r = 0; r < nCalls_; r++) { + auto start = std::chrono::high_resolution_clock::now(); + tensorflow::run(session_, inputs, outputTensorNames_, &outputs); + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration runtime_in_seconds = (end - start); + runtimes.push_back(runtime_in_seconds.count() * 1000); } + + // save them + mlprof::writeRuntimes(outputFile_, batchSize_, runtimes); } -DEFINE_FWK_MODULE(TFRuntime); +DEFINE_FWK_MODULE(TFInference); diff --git a/cmssw/MLProf/RuntimeMeasurement/test/onnx_runtime_template_cfg.py b/cmssw/MLProf/RuntimeMeasurement/test/onnx_runtime_template_cfg.py new file mode 100644 index 0000000..5e5133e --- /dev/null +++ b/cmssw/MLProf/RuntimeMeasurement/test/onnx_runtime_template_cfg.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +import FWCore.ParameterSet.Config as cms +from FWCore.ParameterSet.VarParsing import VarParsing + +# setup minimal options +options = VarParsing("python") +options.register( + "batchSize", + 1, + VarParsing.multiplicity.singleton, + VarParsing.varType.int, + "Batch sizes to be tested", +) +options.register( + "csvFile", + "results.csv", + VarParsing.multiplicity.singleton, + VarParsing.varType.string, + "The path of the csv file to save results", +) +options.parseArguments() + + +# define the process to run +process = cms.Process("MLPROF") + +# minimal configuration +process.load("FWCore.MessageService.MessageLogger_cfi") +process.MessageLogger.cerr.FwkReport.reportEvery = 1 +process.maxEvents = cms.untracked.PSet( + input=cms.untracked.int32(__N_EVENTS__), # noqa +) +process.source = cms.Source( + "PoolSource", + fileNames=cms.untracked.vstring(*__INPUT_FILES__), # noqa +) + +# process options +process.options = cms.untracked.PSet( + allowUnscheduled=cms.untracked.bool(True), + wantSummary=cms.untracked.bool(False), +) + +# multi-threading options +process.options.numberOfThreads = cms.untracked.uint32(1) +process.options.numberOfStreams = cms.untracked.uint32(0) +process.options.numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1) + +# setup the plugin +process.load("MLProf.RuntimeMeasurement.onnxInference_cfi") +process.onnxInference.graphPath = cms.string("__GRAPH_PATH__") +process.onnxInference.inputTensorNames = cms.vstring(__INPUT_TENSOR_NAMES__) # noqa +process.onnxInference.outputTensorNames = cms.vstring(__OUTPUT_TENSOR_NAMES__) # noqa +process.onnxInference.outputFile = cms.string(options.csvFile) +process.onnxInference.inputType = cms.string("__INPUT_TYPE__") +process.onnxInference.inputRanks = cms.vint32(__INPUT_RANKS__) # noqa +process.onnxInference.flatInputSizes = cms.vint32(__FLAT_INPUT_SIZES__) # noqa +process.onnxInference.batchSize = cms.int32(options.batchSize) +process.onnxInference.nCalls = cms.int32(__N_CALLS__) # noqa + +# define what to run in the path +process.p = cms.Path(process.onnxInference) diff --git a/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py b/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py index def47f7..8729df1 100644 --- a/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py +++ b/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py @@ -6,9 +6,9 @@ # setup minimal options options = VarParsing("python") options.register( - "batchSizes", - [1], - VarParsing.multiplicity.list, + "batchSize", + 1, + VarParsing.multiplicity.singleton, VarParsing.varType.int, "Batch sizes to be tested", ) @@ -42,17 +42,22 @@ wantSummary=cms.untracked.bool(False), ) +# multi-threading options +process.options.numberOfThreads = cms.untracked.uint32(1) +process.options.numberOfStreams = cms.untracked.uint32(0) +process.options.numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1) + # setup the plugin -process.load("MLProf.RuntimeMeasurement.tfRuntime_cfi") -process.tfRuntime.graphPath = cms.string("__GRAPH_PATH__") -process.tfRuntime.inputTensorNames = cms.vstring(__INPUT_TENSOR_NAMES__) # noqa -process.tfRuntime.outputTensorNames = cms.vstring(__OUTPUT_TENSOR_NAMES__) # noqa -process.tfRuntime.outputFile = cms.string(options.csvFile) -process.tfRuntime.inputType = cms.string("__INPUT_TYPE__") -process.tfRuntime.inputRanks = cms.vint32(__INPUT_RANKS__) # noqa -process.tfRuntime.flatInputSizes = cms.vint32(__FLAT_INPUT_SIZES__) # noqa -process.tfRuntime.batchSizes = cms.vint32(list(options.batchSizes)) -process.tfRuntime.nCalls = cms.int32(__N_CALLS__) # noqa +process.load("MLProf.RuntimeMeasurement.tfInference_cfi") +process.tfInference.graphPath = cms.string("__GRAPH_PATH__") +process.tfInference.inputTensorNames = cms.vstring(__INPUT_TENSOR_NAMES__) # noqa +process.tfInference.outputTensorNames = cms.vstring(__OUTPUT_TENSOR_NAMES__) # noqa +process.tfInference.outputFile = cms.string(options.csvFile) +process.tfInference.inputType = cms.string("__INPUT_TYPE__") +process.tfInference.inputRanks = cms.vint32(__INPUT_RANKS__) # noqa +process.tfInference.flatInputSizes = cms.vint32(__FLAT_INPUT_SIZES__) # noqa +process.tfInference.batchSize = cms.int32(options.batchSize) +process.tfInference.nCalls = cms.int32(__N_CALLS__) # noqa # define what to run in the path -process.p = cms.Path(process.tfRuntime) +process.p = cms.Path(process.tfInference) diff --git a/cmssw/MLProf/Utils/BuildFile.xml b/cmssw/MLProf/Utils/BuildFile.xml new file mode 100644 index 0000000..5d7e337 --- /dev/null +++ b/cmssw/MLProf/Utils/BuildFile.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/cmssw/MLProf/Utils/interface/utils.h b/cmssw/MLProf/Utils/interface/utils.h index 1cdf162..747fe93 100644 --- a/cmssw/MLProf/Utils/interface/utils.h +++ b/cmssw/MLProf/Utils/interface/utils.h @@ -1,3 +1,6 @@ +#ifndef MLPROF_UTILS_H +#define MLPROF_UTILS_H + /* * Helper functions. */ @@ -10,18 +13,20 @@ namespace mlprof { -enum InputType { - Incremental, - Random, - Zeros, -}; + enum InputType { + Incremental, + Random, + Zeros, + }; -void writeRuntimes(const std::string& path, float batchSize, std::vector runtimes) { - std::ofstream file(path, std::ios::out | std::ios::app); - for (int i = 0; i < (int)runtimes.size(); i++) { - file << batchSize << "," << runtimes[i] << std::endl; + void writeRuntimes(const std::string& path, float batchSize, std::vector runtimes) { + std::ofstream file(path, std::ios::out | std::ios::app); + for (int i = 0; i < (int)runtimes.size(); i++) { + file << batchSize << "," << runtimes[i] << std::endl; + } + file.close(); } - file.close(); -} } // namespace mlprof + +#endif // MLPROF_UTILS_H diff --git a/cmssw/install_sandbox.sh b/cmssw/install_sandbox.sh index e80b33b..1503e4e 100644 --- a/cmssw/install_sandbox.sh +++ b/cmssw/install_sandbox.sh @@ -1,5 +1,8 @@ #!/bin/bash +# This script in executed in the $CMSSW_BASE/src directory after the initial "cmsenv" command and +# before "scram b" is called. + rm -rf MLProf cp -r "${MLP_BASE}/cmssw/MLProf" . rm -rf MLProf/*/test diff --git a/examples/cnn/model.json b/examples/cnn/model.json deleted file mode 100644 index 3c3e3f2..0000000 --- a/examples/cnn/model.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "file": "conv_2d_inputs.pb", - "inputs": [ - { - "name": "input_0_input", - "shape": [28,28,1] - } - ], - "outputs": [ - { - "name": "Identity" - } - ], - "network_name": "cnn", - "inference_engine": "tf" -} diff --git a/examples/dnn_2_inputs/model.json b/examples/dnn_2_inputs/model.json deleted file mode 100644 index d504b07..0000000 --- a/examples/dnn_2_inputs/model.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "file": "dnn_2_inputs.pb", - "inputs": [ - { - "name": "input_0", - "shape": [392] - }, - { - "name": "input_1", - "shape": [392] - } - ], - "outputs": [ - { - "name": "Identity" - } - ], - "network_name": "dnn_2_inputs", - "inference_engine": "tf" -} diff --git a/examples/simple_dnn/model.json b/examples/simple_dnn/model.json deleted file mode 100644 index d424054..0000000 --- a/examples/simple_dnn/model.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "file": "simple_dnn.pb", - "inputs": [ - { - "name": "input_0", - "shape": [784] - } - ], - "outputs": [ - { - "name": "Identity" - } - ], - "network_name": "dnn", - "inference_engine": "tf" -} diff --git a/mlprof/tasks/parameters.py b/mlprof/tasks/parameters.py index 9c800e4..f3ffa29 100644 --- a/mlprof/tasks/parameters.py +++ b/mlprof/tasks/parameters.py @@ -18,7 +18,7 @@ def __init__(self, model_file: str, name, label, **kwargs): super().__init__(**kwargs) - self.model_file = model_file + self.model_file = os.path.abspath(os.path.expandvars(os.path.expanduser(model_file))) self.name = name self.label = label @@ -37,9 +37,8 @@ def full_name(self): return self.name # create a hash - model_file = os.path.expandvars(os.path.expanduser(self.model_file)) - name = os.path.splitext(os.path.basename(model_file))[0] - return f"{name}{law.util.create_hash(model_file)}" + name = os.path.splitext(os.path.basename(self.model_file))[0] + return f"{name}{law.util.create_hash(self.model_file)}" @property def full_model_label(self): @@ -61,8 +60,8 @@ class CMSSWParameters(BaseTask): """ cmssw_version = luigi.Parameter( - default="CMSSW_13_3_1", - description="CMSSW version; default: CMSSW_13_3_1", + default="CMSSW_13_3_3", + description="CMSSW version; default: CMSSW_13_3_3", ) scram_arch = luigi.Parameter( default="slc7_amd64_gcc12", @@ -129,9 +128,9 @@ class ModelParameters(BaseTask): """ model_file = luigi.Parameter( - default="$MLP_BASE/examples/simple_dnn/model.json", + default="$MLP_BASE/examples/simple_dnn/model_tf.json", description="json file containing information of model to be tested; " - "default: $MLP_BASE/examples/simple_dnn/model.json", + "default: $MLP_BASE/examples/simple_dnn/model_tf.json", ) model_name = luigi.Parameter( default=law.NO_STR, @@ -144,6 +143,15 @@ class ModelParameters(BaseTask): "used when existing, and full_name otherwise; default: empty", ) + @classmethod + def modify_param_values(cls, params) -> dict: + params = super().modify_param_values(params) + + if params.get("model_file"): + params["model_file"] = os.path.abspath(os.path.expandvars(os.path.expanduser(params["model_file"]))) + + return params + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/mlprof/tasks/runtime.py b/mlprof/tasks/runtime.py index 4658695..8245b38 100644 --- a/mlprof/tasks/runtime.py +++ b/mlprof/tasks/runtime.py @@ -45,7 +45,8 @@ def run(self): # resolve the graph path relative to the model file graph_path = os.path.expandvars(os.path.expanduser(model_data["file"])) - graph_path = os.path.join(os.path.dirname(self.model_file), graph_path) + model_file = os.path.expandvars(os.path.expanduser(self.model_file)) + graph_path = os.path.join(os.path.dirname(model_file), graph_path) # determine input files if self.input_file: @@ -81,7 +82,7 @@ def run(self): if model_data["inference_engine"] == "tf": template = "$MLP_BASE/cmssw/MLProf/RuntimeMeasurement/test/tf_runtime_template_cfg.py" elif model_data["inference_engine"] == "onnx": - template = "$MLP_BASE/cmssw/MLProf/ONNXRuntimeModule/test/onnx_runtime_template_cfg.py" + template = "$MLP_BASE/cmssw/MLProf/RuntimeMeasurement/test/onnx_runtime_template_cfg.py" else: raise Exception("The only inference_engine supported are 'tf' and 'onnx'") @@ -111,13 +112,13 @@ def requires(self): return CreateRuntimeConfig.req(self) def output(self): - return self.local_target(f"runtime_bs_{self.batch_size}.csv") + return self.local_target(f"runtime_bs{self.batch_size}.csv") def build_command(self): return [ "cmsRun", self.input().path, - f"batchSizes={self.batch_size}", + f"batchSize={self.batch_size}", f"csvFile={self.output().path}", ] @@ -131,7 +132,7 @@ def requires(self): ] def output(self): - return self.local_target(f"runtimes_bs_{self.batch_sizes_repr}.csv") + return self.local_target(f"runtimes_bs{self.batch_sizes_repr}.csv") def run(self): # merge files From 947d623519f086c705472e3610ed58f65056d2d4 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Wed, 27 Mar 2024 18:26:30 +0100 Subject: [PATCH 12/12] Minor style change. --- cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc b/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc index dbd56fa..c6c531f 100644 --- a/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc +++ b/cmssw/MLProf/RuntimeMeasurement/plugins/TFInference.cc @@ -160,9 +160,9 @@ tensorflow::Tensor TFInference::createInputTensor(int rank, std::vector sha // fill it float* data = tensor.flat().data(); for (int i = 0; i < tensor.NumElements(); i++, data++) { - *data = inputType_ == mlprof::InputType::Incremental ? float(i) - : inputType_ == mlprof::InputType::Zeros ? float(0) - : drawNormal(); + *data = inputType_ == mlprof::InputType::Incremental + ? float(i) + : (inputType_ == mlprof::InputType::Zeros ? float(0) : drawNormal()); } return tensor;