diff --git a/demo/tutorials/llm_notebooks/Visual_QA.ipynb b/demo/tutorials/llm_notebooks/Visual_QA.ipynb
new file mode 100644
index 000000000..7045e71c3
--- /dev/null
+++ b/demo/tutorials/llm_notebooks/Visual_QA.ipynb
@@ -0,0 +1 @@
+{"cells":[{"cell_type":"markdown","metadata":{"id":"D285OP467TeS"},"source":["![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUgAAABcCAYAAAAMJCwKAAAgAElEQVR4nOy9f5gcZ3Xn+znnra5pjcfKZCyNfqDIQgghZMdxZMfGxpbbwhjM2g4h2Ak/Nol3Aw5xEsLu5eHh8vCofNl9uFluLhiwhUi4zib3ZomcZBMgARsjt4RxbGIritcSsiyE0GpleSQLMYxHPd1V59w/qnq6Z6ZnNJJG/Ej6+zw9PW911fueeqvq1Pn9CucASZJokkzZaudirC666KKLcwWZ+y4TveyWJeW4/lKZYYD5mI2m8+YdH61Wk3Tux+uiiy66ODeYYwaZaKUysNSI7xSVtfj4MCPi9t8WLhzY+sADt9fndswuuuiii3ODaO66ShQSM7lvvYj8B6A8/pMIiM4/evToTuDI3I3ZRRdddHHuMIcMMocgC9ysFwx3DBzVyFzCQBpF8VyP10UXXXRxrjDnDBJygdFyl4wiTS3egJPnYrguuuiii3MCPRedem57NHBk3A6pwLxzMVwXXXTRxTnBnEmQSZJ/xP2gaDjhrv00vTSigB12tVqSJNrcf/p+uiFBXXTRxY8ec+7Fvuqq+f1RT/ktgl40PogwbKn/XQgv7KhUsJwBJjNIr10G2UUXXfzocU7iICsV9AfnL4k5nG85//zYKpXv1pMksStv+uT8eKy0RtyWqU9U8U1cU5e9Mb17qtU7anNPWxdddNHF7HEOGOTUTJpKBa1UsC271kYLjh79zyL6bnefP3F4b5JzxLEPvrhw4Z/v7sZMdtFFFz9CnBMGORW5On1V5YLVsUT/CNJrlnXcUzXg+JfU7c5K5ehQ1x7ZRRdd/KhwTsJ8JqMpTW7dzlJc+swykBZ3HpcdAfcMkVAGLVerKHl8UBdddNHFDx3nJMxn2sHMFYrEmrbtPyQxtosuuujitPBDlSDXbwgqDo4grUTtCRJkF1100cWPC+aIQc4uZMdMLAhtzDH/lo7KdhdddNHFjxZzwCATXbuWCNZO8/sWBgdfUvhuCh75hN8mM8P2djfKp4suuvjR4iwYZKLXvq7/YrGeD7jbIBxF3NskyZZ/JTc9LkyBBdP5XNxBwETV8OwwcKJSwarVM6ewiy666OJscEb6bJIkWq0uXOkS/ptqaZ1ZSqsoxQxwU/f28J7Jxzil6LwnG/aDD2zf+rtbz4S2Lrrooou5whlLkCa+LmjP8ix9KXUkEloWxBm+TaTwnDsmok+L6iHcIxcxaBzP0h98bnvlxe1szetLnu0JdtFFF12cKc6YQbprjLgiolKECzXlwVN9Fz2kmdumyPyhNLhGmRhEI9XqnceongFzLIpg0A0s76KLLuYILQaZJAobIZFZMphsgnQ4W7g7ICaAqp2oXHfs4K5dREePthsnZ2BySdPOWS2+K5bTvLG5rcsgu+iiizlBziCTRyIWDpY5ursO5PnPic8QunM3ofgvZ46T2eSp2tB04iRJYkmSpDOmFCau44x77e6II3GZ0s+U0bEyvq+PTc/2Ic8tw5fGJL5l9ky+iy666GJ65AxyydJVuN7OYh/lM88OIQwjz42QygjKMJ6OYlajhzqhd5Q7qFPJO/Ai7Lv5fx7VOHO7CfdZZPJsPtwLe9fxmb2D4H286IuJWYTqAvS8BbgsRmwAGCTL9gFb5mhuuuiii3/lyBlkqsuZN+8OsvogIaqhOgqhRikbJUtHca2TpaM0pE5afzBJNn5m/bb7VGkP8p74/3TtcSapBhODIjvDvj9I+fy7kbCGtF7GrBfPYtwUc8vXd3AIEdC5AEYXXXTRxZkgZ5Alt9yg6BH1sX5gfsHbNOdnriBQ7jVOvpRWqH72rHVYY3bGSytFNBqLkXSQrFFInN70hBffbmiYZYdddNFFF7NDIUECJcgZjytNxtiEA7iRpYqQTu2mubPMsi2AIGKz5LMCmOKmHeMtu3yxiy66OAeI2v6eIthbirVlRGGyq3imlMHJ7bbM60ICzMuatSrsTlmXRrFZqeNddNFFF3OIXEXtIBNOz5CauvfZQ0TqANXqRH47qyK5XYbZRRddnGNMlCDbMUWY7MyR2r3Ys4XjiKC4r61UPnMQsrJpi0lm+olDpfTE4Wo16cS6p6Gviy666GJuMZE1+mTD4/RcyFWsGcRzOpCWAKogHzGyjwATdPbg8QF06d2Vyv2fn75WRbc0WhdddHFuMclJAy3GM7lG4xSHSwp5QLa7W3uwT4t1easHkem1cqHVrWMi0XIXeY9Qa/LHtmOno+cnH801wydt6wa9d9HFjwgdVOxTOVya8N2W1YdE4wXi2YxH5BFERidm5u75/sVPDmAZIEsta/QC9YnHdex9GhrPHJ2YVbH9HDCsRG+6aaCvWg29k3+pVDanlcrzx//lMMr2eW2d08SVMP+lnOuPEdoz485Vptnk7LvTHSdxhbvJ04anw91nXm+hSV87XaeYl4kqdrsXe4oGOy7iWZWKVbJtu2HwfZlnG8VZPC1RCuLgbgMg/ePVfMaHLAZpfakI5gBxTOvHSUzwHGrY0zHHczXWU08tKZ8YyX4f918uwt5VwAwipfF0tbrkvUmS/EQzyZwBJkYClSo6NFRELly0FtjNll1Q1P+05vz/JJ9vF2eARGxqrYV2VIqaC8nE9ONT9lvUmWj2u2VXG9/bDbuHLO+bKf1Ob4OcUqpxIiOrVLAk+e2HIdl62WVLykuXTkfd8wCcGB78UAjRfzCrRyAzVBGapTR4jpjjbbdtiavVY+sybIUIRhaADIJHiB4DHprrMYeGxqK4HF6uIbrYLVMpXgiRBixr1EulenzKTn5skWilglarS/qvrty7LFTlNSby6gWLfJkg/Rw7rrB4FOG4kR1av97/6aGq7CXWw5VKcnxGR10Xs8Omb61A9l0OGXhQPv2tnfzOq/fOWf/JIxFLll2CPbsq3yCK6yj3f2c7d7z8xCmP37Ir5lhpGZEuxp5dCroAedl8JJQR78ElxTmJ7x0G389nnjuI7B0i8eP5+DMwysSVnzown/i5FaitI7rwSk74UpA+xFPcj7P0woPw3C42P/c0YfcBEj/R7HN6RuU+KS6yybgKKRVyzpwk9tRTjD711LQUKsC111nqba6Yyd7vZnvWPvEp9J09KpUkOjR8qC/WeXeKh7fnGToOLghR5GZPcg4Y5Lx5wTL31C2z3BSRM0jLR09H53rAHwKaUmC1urA3w25Q4ZYS4Ro3WyUiKqJ4YcMW0DyyIeBqtZLqARq+AwY/BTz+Iz2Rn2Q0JSd/7mpCuAejTKlkYB8C5oZBJolywZJBotIHSeVW8BSIEB2hkd4BfKHJJzof78rRby9nXvmjZI31CPNxi0GLpBAthCEDF0PCMCE6hNsOFu39Mg39exIfmZZJLn52HRq/DS29kbSxGhFFFEQUHBzDHUxSotJBTP+SZbs/1mSSE+MgRVpSZJP5TG5PqEp2ahWoZVcquivY38QCFq32KVleJ/rm0ATZM3aeQkCQCCd2J3aIEVVkJsn37CCtOyEPgZrgiPrJxBe/uKScuX44aM/HwX8NfBU47hlmDSyr5x+r45ZinoEQ46zGeKuJLYcfrsnjXxaaaqUoqhEiMVEMOoPD9ExQ0lVIuJjcfFYGIkLUj+hNwKn5hKS9qCwDGaD5rIWIfBGWDDzL81OiHiWEftzW4PZOeno/TmQbedm+pR2rj21+9hqi8iZEfhv31WgUIZr32RiDtFgJQRVEIpxVGOsIvdOo2DBVahxvnzkXShL42rai+0nGw9MNE+pM31w7aQzM8WbON27F2+aHgJ9873zTrnre+endIfT8dpaNxTiKoHnWapvtuWi3NRRxQ+WAethd9Ne1RZ4NJrAOn7uKqYkra3dHHLN1pPXlxeJTxRgZmN/A//vcfN75yuHpO7kb5J2FFJfm6cRwgKzxNwj/E6eGiaLWh6SvxFmPllbgBo2xBcQ9v0Wj3s/CAx8i8aFxO+aSfZcS9XycrL4OMyOUFLLDGF/CfRduI0BMlr4c90twW8d5fQsYPvY1vvuq4dxZNNmL3ZTOxnmYTGqfBQwIs+lqMmMYyw+cvEs7fXMNV/WiMlBLqJbTZ+b/SrFlF9HCkfR3Qii/O01PxiIStU+d5Kq1tiWdGoKKY/nLCEXYWS8xVKkkUdcOORdwxl/ycyk/vhAW0Ft+HZmVUVXS9CuUoktxHyREqxitryfxvwdmthU26z3kmtROTD7KC684NuWY+7/TT73+a2j0XsxXkDViSvHtZNn/4MIDnyHxlEXfHsDlA5hdipmhoY5nW8jC3bzn5QemjJ24sujAcn7w4luw7AtTnTQT4iCZJtJnbpjDqXtpqdo5q+yZ0OrYyU+usNUBk+M8f7JQLOi2lhDdlqVjfcJEdU5EUxE9CLbHPT3miKlIHxIGUF2M23KgTJb+c2znDXdXtpwrTHSyzgkSMe57bjlZdmmxxRC/n6h0F5ktQAOkfhNUv0Jy/Wm85DwizSKuQ0naH+674bsrhlny/B+TvZQSlT5CI+1HrZcQ3sBIbQtUh5CfWUccX06jDhqBsJVG9hGGXnFw2kLgL6w4SCL/9+TNp1Gs4sxQVAxXhe+rBMuQIrB8qoMGwAUTFBEZcer5pJ6qNNo5oHvSALPeczycZdK24vuslZvJ/Z+q79kEn7diECfHJZ4+vdUqmrpfEcxX57p06zeRAOJfERu7B0r76uXGcM+YGMRlPOuzLBuUwKVo6UqX8Pj1679bb94/pzqHs6F5ch/5N0yOx5yu/5lspDPRM/m4TmOeaozZn2+bdjgXKnYzHCYK1yC6ODdLZUOkPEpmr8eya8hSRaPXMPiy5SR+4LTjIrdhU45JNirPL6mx8MBfo+k7CKXX5GdkawjxAi5ccZyxxsWk9aW4QVwe4eTI3zH0qoP58dPQMA3j7BzmM9lDfJYe4yRJ7NprP/Gwp/V3hKh86cyKtqu51zJPv9DosSPAYO5JnkRnRw/73KEps+aUztx/O5NKinbTNzXl+5QPcbOo8ERUq2iSJIz3P8n5Nf3DO3176kOXKLPstxOSJNEvPzHQW66Fi9ysb9zmSG6gcLNhj/QDgeN7Ad5wVf6oVquMAMe2b0/23XbbliePHv3eFqE80hw3/y5oSzoO3U7EeJhFqyrU7BaBa55ra15a85Mk01/D6embpRNz/LgZmanl3uDmhsljnQpzrJWMMxq/CRUgMpxvsqh+jO/V/wcS1fAsJu5dRnbychLZf0rypqDDGlOJ5PNwdOMQS57bQ6nnNaR1cPqwrJ8fSMw8/Rncy+ApwgjoPujAbDuez0RMVLHbvdhNJjQeG3l2TOjrX//9pyuVe/+NWe0t7lZkjDTvvxZt4sFcbU9w2f7El39vhJvfNJinNLbR1ZG+uUXrwW6Xb6dWLE+SRLfsWhsNHj0yuH7Dp1bLtvCaRwivuA4WQBY/4jricOhasn/m2vt2fPnL6QFg+HSlnaEh9KuP9i+9Juu5YSty5XUbfCnmPLJN9nuWfSPL0scrleRwXhkp77dS2bQiwy/11FJVVVOxrdsye+3rP7Xz9a998UheZm7higy9/LrruQp0BdssAj3yCPbPlcq926vV3j1JktRnS2vISmURHURzb7XguIuJBpzs4Ne/dmRPMXPtqvN43xddtDtNkuRYs33ZZZt7zz+/foUZ860qputVATz69KEXLxh8ZvDobhsbmz9fe3rWbt2u16x3+XnB5rNBRrZW/cA1lU8+GNGzE5ITM9kyK5UkeuihRQPr19+76pFtevl118urcJaSe2VrW6scuZb0Wat86tFqNT5QqeT9VSr3l2H0cjMbaNJnKqbmCvcc2779vY91GqvOwou3bpPl11TMqIKuV0313oOPVe/aOXX/+8uZ1i6Rbb6Y9cWEVc2iikZZ+OTer3/t93af+so0X/fMnQ3yvj2X4H4NaUMRMdz/jtsvqrP52R2E6ABuq0nTAcRfxyef+wrHV00fjnMmj7Fbffx/kTpRGOWkKm5Riy+IgkzJUJstpqYaTpYUJ4f7nAWq1buOAPedar9WDF2HHzvSdy6NkNImQU50FiVJol/9av+yhfHRm116flHcLgcGkOZNEEAEcVdcUonCgbLKX1+74dN/Ua0e250kSZ0OaB9RALFQvmBwwVvUone523rRkN/iWkjiwm9GpWg7LL4HfusrkEuYW7dlG5Tojzx4DUHVzUTiUW003l+tLvxLM26UEL1PsHUQehGseY754pPRPhi9p1rt2wIc60DqjBhfkUhcPU9HXXbttYMXv+51Q8/kNHZUVydsmzcvW+we/YEIl6q4oYCLikd/0//9F38XLlhe6gn/HuRmcVla1CzNRxZXNfl3HvE3kl2wqVJJdnZikle94Y8HsrGxDaUe/SWMG9xYIKoTGEkeiqcaiR5w2Oos+KvLLttchXqvubwHid6q5PSpuEnQ2C3aWakkV7WPmSSJfvUbFwyW0ujDbtnNiqSIqASNStjDwE3ttFUqj0Rp2LU8ePRRd7+6SZO6mmsoq/EeYBYMsg1z5cVWuYFSOSIdM5BDYE8CUPf9SGMvImuwFOLyJdjoCrj7mbkZeCMs291PI1pNVoTqiB7ETx6j96U6dv4xJKQgkGXzwS7jwgMPkST1001TnL4e5GScczvfRJyWLekcO2m8k/yfJFqtXrA6RPGnIPrP4De4eb+54Vkzxq+BZ3XcU8AjsJUov68S3Zux4M1ffGpJOZfiOp9MMeWxpPZOJXwUZL27q2f1vN+sgWcNwMuOvxENH69U7nvNuBqdaU01KEgZJ0aIVUOs7ksz+A2Nev4Q/Grce90LWpv9muFuKyF8xCj/1k03fXL+bOIR43qtbm7H3a3wSkPLbCD9ov7Rr1YHr9iya+2kJYc7I4rE0JCiGmHEOLEEjZQwX+q22qV0r4j+O5ylbpm25iWPrQTvF5O3u0QfzbKB1ZP7r1TuXRzX7UMq0cfBf9VhgWOYNcav43if7ubmy8F/TSW+5/zz7feGFv70sKg+JSKG5/RhRSygyKpG44LBibdNYpr5MlFdKSqtawORO5dWKpsXTKRvm6mzGMIyEYnHx4AyeE1cpkioM6KIvT4rJIly/3f6gdcXy6AoIjtI64dJXHnx+SHcniCKR4EU95WIrJ05x7oN0wljSaLjtsK0VKHUs5YsNZAU9ypmx3j+sjruu4ii44hAWu8lKr2Z2tjVrL0tym2ns4+rzXecHObzI8aPX9zb1HmpVC9YnRE2icrNbul890wR0yYrLbJFtJ25upu6W+yZXy4e/vC8kcbNUyWacS++uhuOrBb0P7r7cstSLVxammcESB5bKK7uZu7Zmgzf+NBDixbkc+i1PI7eQUxx1KwRu8htKuH95o1lZinuZjjmbX2Cq3umjs8XLb3rByd1PcwmaPv7I0L2zyI6MjHeFXAzRG6MNHzugqGhjZXKp9aQd2rkJocpfTcaYybjBUscxNUtU7N0tbr/IcgVbhYVvNha8yKKgONq1oiRaL2WSu+f2HuirtHHReTd7tni/HwzBVcBXFAR1bbzUMSa46+QEH9w4dDQ73iWPSOqRxAMseJ6ZIjo/FJJV7aGK87RwnJ3W+qeX5e2/QfNGmsLm2lrPlJdhtsCt2J/DNEA5nvghT0zX49JmCsnTb1+MaXyGiw1oEaWfoOFHM+LSVyfYjwOHMctIksHiEpXMbCvb+blpAtMJ4s1+cLi564h6vkAWTqAqqL6NHbyAY4+MAoYFu3A/BmcCDMQ1hJKH+NY/MbChpnHSs6Clok7zCgl/ngwz444x8JtK+snI0kSrVQ2rXDCx1R0vecXILeL5a/nVELphIjsNfc9IcRDImEiE/RMRWWxEG2+9nX3XXLyZKaTw2HGz0noBe/L/1VUo1SQnKG17SqCmmdpFHpeE+L0LUmSqKnXJ3QoqHtWBrnULFuGmZL3aaKKeMs+JCKIiLplkWe2LEjpjmp14eBkp087kiSxSgUT9+2CPi46yd6UF0lWz7I1IcT/u0v0j9dtuO/Prq3c9+bXfnXJsi1b1kaTmWSppOZNHWe80ImD+EoRvcIsNQRVVUSDFT/bhIQrcfWsHrn7r61ff+/VkOhll23uXV8Z/AOV8KtZNtYLFo2fN2IaolGVsB9nt4TosGioC0W/goJFWVbrDaXeD6Csc2cvIupe3C3uphppBs0QGBLy1Etcf8GzbAGeL4ZXVLMy1aAeqOQ25MSqVbRaXdiL+s+6Zf15VpxAca+4yN9Xq0n6Q800ShKF65RM14MMgqRE8X5UHmf32nSciVn9ScZGnyaKQQKIVuixaSs2FCgW4ZMyJZayaPEyNn1rBfftXcnmZ9fw2b03sOQ7mwjRf8fSy9EIgj6O1d/LnWt35IxPjLtW7SPLPkb5vL2okku5cimBv+Wz+/8rn917Awt3D0JVT8UoO8dBdsT0XChx1yLwfE6QnKtyTKeBiT5yz62CrrlDRl+8WQjXFA/nuKoooiaqO71R36QavknGaCb1derhXaJhvVsWk8cwqVlmqqV+Se0DIZTeZ3gqjk728I8nZmrY75buMOe4qi4vJKeBPPOkuZdHZo35SrjuoccW/XUkmRVse1IuRe52EpW6oI+aNQ4gUtYQXeKWXTJZzc+7tyvAlkFy5NRe4Rf3Zb7gc0HjNe4sds90vB6ooI5hWcMQ6ROJ3i6kb45i/+bCRcf/qlod+AJwqOmpbzTESrGk3kZ38yxwN5HIVGSve7bTzU5I0NWIrMOy/lawQ26nVonVqN8CyWPnnffpimjp7WluP8sZjjuCGnAo8+xz5tnfSxSOq9sKcf6tiLzV3fpaHmGP0sbYAkF/CU+HNET1jCxu7w+4qDlfCfDahs0v9ZTWuhvuaZt06nlMs8vP33LL5t4vfvH5WrWKXX2j9pbSsAo3xX2cRvdsGPWvz3wXT4OzYqcb4WX7FuPhKtJ6nKuxjd00xiZ6qe+6aIRNzz6I6M1kYyC6CgmXksie6SvxCGCgcjla2gyhmTgQgffhtpigfWQpwGG88RUyPs6RVROl6MSVIzzEon0fpjzvD2iMrSgkXSPSd5Lpmyj1PsqSpV9G9lQ5fGR/EfIwTbmzM1GxN26EJOETu04ul2dH3+S/IhHuhoQzn37PDAKf+NWxR39/Tc/TZ9zPHKAV4tPGpAQbPHpk0CX+JfD5tN9qriYiJ9wb/3HDhmOPNjfv2rX20JEXXzyo5veAXOHuxUPratYwDfE1sTQuMbfc09tWetidIutEdpqnH80auj2ObbQRxgaiLHqnavR+t6y/RbXg5mgUrQhZulhdzCfFIgKIYwh1N/usRX5P5DIE9ahhsiYS+SOQi/OiGQV7dVPQxYJeDDyZJFPDh5oowmSoVuVLnjUGRMNHRaI+LyQ9mhlJuRqf21CFPjeviMrlaPn69Rs+/alq9dhjlQo0GuDixaJtE9ITTTQC829CfaNQ3yk6r4bbYkPuFA3vxrK+1jUS3DMQW1epbF7gkv0i7oMTcyDERMOwe/qpejn77BNfPj5S/HCgUhnYax56VUu3uzVyVb4ZDKa6yiwbVbeaIHFz3twzcF9dqfzU/GolGSZJrFTZNGDua5quxXH2KCi5mr36e99rLAP2QWKa3dcHvpKiDB5Cs97CHjLfe0axn2cjfiRibPrWKuKe1aR1I4pr1Eef4OjQMZKLWiXDAHTvw2SNEZBeNJSx7A3A508dD6n9aLSu+D9/EIpsXxr1lHweTiD+jwhD42M2+22mG76w6i9Z8u06qncRxVcDZRpjIKEfsVuReAORfpNFS/8W+/W/hOTI5MIas3fStIjPaSharqzE5f0CH0T0g4h/UNo+p9NG9QOi9gF3W3c6FJ17FGxSvJYSLnbzy3MnRpukpaqI/7Xasceq1evG4yIvumh3uviCC3YiPCAhGqG4PXMV1k1hIHO7HogmhDMB4KYhOu6SbQr0fimOXzherRwd/cbDJw6JN+7DssdEI9zb46QwdwZClg20r/Mz3qNDblPXrZbJPVE2dLBaPToK3x95fWXom5h/yt1TL9TUNptqZMgrZjNbuap9dHRkJPoTJ/tdYK+GWIubfeI5NhklmbpZn3t2q0rPPSkL3ghAb/uuzZNonoupB7sbjldh5ESlcnQUjh5Q5L+CPENbFXvH86ElLDUdW6caX+JmOm4eaaq41tiRxvqnN13ZZI5JEat5/DCBexxLc2bbJMrVzfpBBtzTWq5mA1DYFcNSiBZX8pU71Sxbi2XL3QxcwN3cyRMn3Ey1NKAlXdOkO8p8qbstd2tZs91NPfUdUDsx1ck3C5ypCJO4cv93yki4nLS+vAinOU4WHodKEaeZaDOPmedX78PZQVTKGZzZhsK5MzM8HSUdO0ha309aP0BaP0jWOIGIUe6NCAFCWM28+R/B5HMsfnbdxFqStOIan/+fX6KR3oll7ydLdxL1KFFJMQNPe0nTDcTzPkKJTWzad3F+bMtkMdFJMytPdfHMFXMgSorIqED+cUZo+0xoU7RpfSb9PuowKh3X3v7hYrKKXbzv64peJyrz80IWkjNJF3PLhh17II+N22btQc4PPLA7bbhvxX1IhOYDhLtoljV6Bb8cvJ/2cnCOiahmWX3Ig26tVr9br1aTwsaTWLX6vhMmfFk1dApk70uRPjWxKdIjmCg1cftiFA0drFQo+kvSJEksy6wqovtVWyFN7m6ImogOMkskSWK33PJ8bfsjd/1pGuQNZul/EtHdGnpG8WAgaev9InnxCnE1y2K37OJI40/Bomva+2wG0DuF9CiyY/vWux6qVpO0SX+lgp1/vu53T3eIaJ2mKNw80r2XNLrW8pTGCVCNMOVvH3voPUNF8HdxbP7/9q13PYbzpIQSTAjeFVWVsjsHRQPgzegzk1CanyKrxvcN4ToJIXYc1Qjwb6roweZS9OY+X+DSSmWccV+C+4LcOQOCpqLhmEn29Wrl+8OTVwSdHs2XPGcnQY6MDRDF16MaUeqBsZM7iE7sbDk/ig9AIinIA2SZkaVQ6lnOWHrD9J27FXRuh3Ataf3nSMd+lpPRzxHkZ2nUr4lUAr8AACAASURBVOXkS/8HIjuAlNEf9FMq3Uyp9//js/tvnVJkNxEjuT5l6JUHOLzyM8ThtaT1X6Y+9nlK8UE0GGZG/eR8gt5KpA+y6G2Xw8ZxJjnNu8QnqduT2y2IuYGnhtfBUnJ5tPPH2769rQ0pWNGWVPxUl3ASPefAf9SxSyNCfDWiJmBN+5yoIqqHTfwAdPbC+1jPQbf0cBFnaOMrO4orooOO9I+rn+MQBEZcs1pnlVYONetHTiyI45GgEaRtFq6m1wIDHcnwY3n17ok9RlGoC+SFSGWCGwiE0yrc25yHbzx858Ht1aGN4v4rno19VFQeEo0Oi2hK4RgaL3snglmmDstd+DCjcVSYGZjw2hJBjCPFSBPu48sue76myAtISPPzLc5B8nMQZRVu88enq/g2S8F9GtNOPoaITPrdEcFAyiqyF3dEirAmwRR6BVlRrWJr1xLltlyMgkE6uh2V/VLEznrWKLv5RbCkH8Al/KxoZDhWOHNURA+QsTe/dKeTauhn96wkYvREK/BsXe5gQlGG8f71fGbPGyd8Fu99I5959k14I8ZtBFFDxBC/iS27TnEfSUqqdY6uHeWui0Z438tP8K5XHuLoXzzO0OGP4GPvIEv/BNE6acOwdDUiG1my7JKOITxNafKOl9c48ud/g/a9i3r9DtLGnxLFJ9AI6jXQsJhS+WMs3bOqGZI0UcX2JuMZt8xPbY+jzSvj1BCpC1ITpCZyZh+EGlBDfHoJshN959SLPSFPPHZncOJdVgwucjzKQsfAb0isp+fQMHBMVWkvC+wO4tILEkNhMyzGbf2djjKvNfdoUz+104RMYbyGTX64kiTRRqTmkp9H03c/V2+gavWF3SLH/ou4v8fTsd8F+WNURmj6porxRFDPUhC9JoR0DWitKfw0YwUACFNfpM30wsyzurTJSs1XiLur4QvcPPY2ppFL9lkaEXUMiG97kRwZZw5FzwV6Ef8ndxsZZ+aOmmW94K+47JYl5YGBwWU4a1pFkQ1RnkD0ADC+sJ1GpeVZyJYmSaK4r83PurjOKlia7g2hdPA0pr5F55nGQTbVV/cKyCCWKY0xQ/RWouiPCD2fm/iJ/yj/lN6PWx9uSqMGGl/B96KVM4fYOJTHtPOyC9uMw2v2kcUfAdtCFEd5LCSXIvqOZsjYVPrb7J53Lh3lhVXbKcfvx+obCeEQGnImKXI5pu/gwgMxietEFRumMsJTqN2ipDmDo+ZCzdXqLlZ3L75ltm3qAjXwus2kBHSi7xxGII0/jrnEGkkeqNuyXTVvXJd6o6EdCysAVKuYIB0YqBgaVCZyiVlh5uq92Sn3mA06BsmfEZqmgSStVF44uGHDi19qjI1+yN3vEuFA4T0eH89xVKLY1K91UqWI5/TCwTPZMz89/cW3FDpsXso8br2AJrhL0jRk07zkmpCxcRW6SamBO+UU9uCyVzQycTcH3LNYkRXn/yCdLxGXiJb6MENENEsbdXWextLv5jZJDMHcWCoNX/zEE6v6EFbiha3U3VTDCGL/dGYLuZ3FszLOYPQNSGFL1qBEpQFgGSJLO390MSGKgNzuV4oW4375zI4agU5l9NvV96MrhsjsHiwbHY+Qc7uVe3f1zZgt01L/jRUHRvDz/gRr3IOEEUQhrZcpla9mNFsGc/AEpSmIWj2gGJh625uh+aKcZdudVHBcT9MGOUfPcLWKVSpphER9orlHeFzykkLddclVhZz28ZqGDr2lkk3jUUy0Urkwdk72NVlqy/nh6m41F6nLhBqJZ4hxlTLMvN8s0KJzbkX05hxVKsnw0MJlWwaODcVBo4+5Wb9IW9FVHHHWgMduTRUcaIsBPRXG59llvOakC3VEwFrsMZckJY4yZszbdbfzRbStXsr4CGnJ5TBBtnor9lFxjBAPYukCsNeqKJm4iUQK2d5K5ej+rdsu2Ccan3DL+t1dRWxQRFaMjIwckuCL3VtXwtyPoZxe9kzz/Jrc8UxtkPfuvRT8NWSN3K5kthfP9mAetdJrOw3tA2i4FKxMo94P0ev4+D99ie+fGMkXy/r26dHRYq5P80f7dhNK64qCFSuQsJIkyVMaT/UCuf76lOQRWPgzX6As/waXDQgpqsvRxjIS2TdRxT6ddMKNG4tDPBWRmkNNoO5IzZGaS/E5jTbqNReti4fTu4RzJEHmapSWaa7SKC0lU3Nj4xFROdQ+Ty0Hji2uYx09dEkCjdLIgIsvNjOgXfoUHDuheYXjlq3wNJhS59PPOM3whNPs/9Q4VQBztZqkg0d3W+S6WzU6RFtgeZ6P7gAxPiGb5bTombCvkJfTcx8SpD6+zEfBdTVEajbVeVOcSxF9wEpErKm+53lNggjHwWrm2T+4pXVENF9SRUxF+qGxGPe1ZllhRwSQJ5MkMXU9KKJDCCaCOl520VeGYKtVS3mWkGOiQS2r71Orn17udfPkzxYRNxKXI/KMpRouG3n+lb+Enn8bPaXpP0HuIpSeyV9KppTii+ntWwnbjLMNoHbJFwVzz71sQeaf4ohJqBiMHaFeP4Bqmj/O3otob37Krb9nhsjNTWuKmEEuR07Rfjrxu6nPjpF7XSU79xLkxLp/UKmgSZKk69dvWolk42EW446/nA8edOGo5OEhxc+Cu6mIDqpwCbBzciB1ksD6DaxRiRabp4wvN5BXuUnF0n2GRHqGrOicmmDPoP9OZdSa8zxRwk40l9qzMnh5siMwd1n5CYR+0dzHebr0tDQANHegaOruB1TCCcda0qKTB4wrVyVJ8qVOmkClcm+fua+T9vvZx42jB8BHXMMeNfYDa8wzlTy4e74RLhVhZV60Q3C31Mi+AZAGORwsPYSzGjBRAdFV7vYDFaWotI5IhEj69Wr1fSfOrIiwnNnNkiTKsn/fT+Pk68kaoAFE9yAndwDw/JJa5wML5jfwjv301J9Gw7p8jRlbidvFcN0cxDrnWWb5v2ago62c71nWg4t+2vAf1HKeZNY+SR1Y48RMjqntAm2MXyH1fGU6y4qU2BwtBaa1TSe1WxARyzNWbAYJshN9p4/JD0ClklCpJLr1Eb9LVPvNsjw+zwsmaKkiPEua7XMNI7j0uuQ5u7ntSGNxfxvwp8UImveLwoVRaiOvV2WBu1vTGC+CqZaGU8+eELefZ8JbY/bnNc0V4mwtKGf2LCVarS5a7mK3O/5MpXL/1mr1jmm88HDllQN9mcstkqYrEJ9EsIDotwS5zJuhQPlmbb+zZsbE2VEJqWm6C5FDIEvHexHUrAGU3vjwwwvur1SS/fnSxq2eTLhRJVpheXC7FhRansrOznovwyHzuro+jdvaptfZ3frEea2jA4ghqoAcDsiTAFHmQ+bZXtFSxTyFzFXUVpl5LJKNu/TMGmTIGdZXPxsv9kZo7LuEnvJqxk6ChgjsSYLlDq0Z6ywmyvFVIyx69h+Ie9/C2EvzcesnlK/ip1Z8gUsPjHB62eQth9GSvQO4ryJLc6btNkw9O3L65/eDXlwGsbQo2yajICMwOdVwfIXA5k0jrfY0T4umpRTSmqOWhzugrcfcaQmUxcbJAmZ72y0X1CSawYvdib7ZY+3aJB4cXHS1iS/1NN3nrieiKMRbt/pKUb9DVG81y3TcvuS5ucXhYObp0yX1Iy6lRxG/Ec8lcgTFUtMQ3bi+cu//1hjr+X96eg4VMWoLyyYnbw3S83bL0phchcpVJtHIspMHAjxs8PNeLHrkM7C8TpjgZsgdSLTbICevHHk6aB07OyRJYus33Ls60vPuzGxsmVntmfWVz2zH7B9V2Z8GhqJMLAvSGzJfaeLvwv1N7lY4UYq5QcnS2qiKPezwC+30nO55tJ+/4+oi+ywd+6ZoWGd56FbO7NxNlLUhkg/Coru3bHnhcJKQVqsXxnnNR/+ISRp5U5b1XMbVEO03sr+76crjI7t2ra0NHRv6Bwi34pTzQPJ0PrABsd7WlZKdwJE8E+aukfXXf/op1WjY0rQ/L4jhqwVZbtbIox60hFu2uyRHnzytk++E5vM203KsTSSee5Nl6XqcBagaGp2g0djG80PD8MDMYyWJkWxULNpO/eRhRPoRNczWMy9dyrZte1j0zkkHzeKhXvJ8GdffptSzgEbNiGIwHuPFVUdy73el5c2eaclZqkr2skvp6bmYRj1Pa/TsAMYhEtepSy6cUT1IrUsza2Py8ZM16RnahhgK0YTg3kk4i3qQuXTzU72m4VfE7TcJ0Ql1GTUhQhlAQtkss0lDGGAisr3k8QGIR8xH/0IlrMN1QdOp4DmTBJcPx3Hj1akt3HbttYxmLlep6O2epUvBtWlbaxaeyCz9XP1kOtRT1gjBcLS9HuRsMZVlZMW8hDNijNB8lGdPS5IkumULkWSsymx00N0jCdGlAusMUhOGg8mwo6mYlc19UDXEmRW1KNqcHqKKW/b5RoPDUezllg9b8NNw0sCkF4N7/gIJ/ldCuFHUV7lleYiNoG5ZJITbHR+8YHDwi1+r+rGgtVWWydtEdY2bjWsADiaqdcuyh+aVSzvzEKPd6QvbFz0j6BHwFYVwoUBuG3Mxx8zddo6OlIab8/a17faMWXZCkCKHXGKYGHcqKtXqI8k06uypZ2EqNkIyUzTARqCqLBlcisZXktbLedSF7CewO2dC15/aX5CIkTxygMVLHyOetzZP99OVqFxBkuxm0+3ka08V8OKZvo4iYHsjucpaqM6Lvr0Az94KelcRagRuJzC7H6rK4LLL0W/3k922k7suOjI1pKjoKxHj3r2XEOR3SRurwYxo3ijpS9tYYIcY6iRBTodpHDgaxtLM4xqSV0M5mzx4AcMhUzk9G+RpPC31uBzHKQs89zAOoDIghSrtZHnwdrPb3GZlInoos/pfBV48AZDFi/5eG/yChNJveFYvN1W+/CR8vov8RkDfCpK6WX9epqrlnRUXE1V1S78QGPt8Z4/zGbpG5Ix9lB26On0MDv5Ur6Gvxr0XUMtSy/3FROLaj0o/4uNOmMzSybdWKqqK2ZMe/F5ixnn9mUnAHc6jAcdeHHx84cKhTaLh4+QRNCYi6oJC1gv6JhWtAKPu3gfEZqZ5EXsHxDSUEOdxs9q9Dz74nuMA1eojkbL7oIscQFg5ZXwRUwnHzPyfb7nl+RrkNuqr3pDuK9X0gGi0sjBUNZlwbj7FasC2fP8zWXvHARRLI5yL2LT3ZngO/Fe1df81K+Y3289C9DLDWIPIxUVoD2SN3YTy1NUBZ0Jyfcpn9j6IZe/GHUKIsfQm4E8mO+EQYsT72D04zIW/njK6OyJ6Wxn2LiCTdZTC67HoTbgtAIworuPp54nqW7lwRR+mb0PCrdT9m2za8yD+rd2kpUMMMMxL56WE28qk+xZz395LifRdIFdjmVEqK86TpKUt7H5FSlIwtdmZqjo/sHWLLcJriMbkthhMMHVTkyh32bppvq1gPqKFimJKsX+zPwXIZggU74RZPjdJkthrX7u5TMziwnsMnqdw5fbrdkkjV/5D6BnNvPG5gD7ctpzB0A03fOIPGo3yAo3i2y2tNyWaXDV3U3fpQ9wQz+v3FZKPoIiqmttXAvLhavX7w5XKwl6bUUL/yUA+v5+YX4rDxS5mZm0vnPwFpLl0MEntzf/Ns0tCrJ6lzxD8w4svGHzm8IkXFnQebXbocGtYCKndfvvu9IknBv7kpZPyStHwW+T1N1NBiqfBcJMyeWFammuku+dZPSGU1PG9Da+//xtfP76nybSq1W122WVLDp/Xlz4jGq5xyyLaXroI6iIHVdnfnDOAN1yVnPhadeGOoGFDXui3FWCV2yzZL954uv2Y00I+x0paLxNKt1OK3zTrl3CWlUkb/eBQikcYe+kJDi87cdqLcIlvJ02PoNFg7qxhPZv2DY4vP49ofhvI5YSwGWSYWqNOiCKM+USlBZRKg2SNATzLmWpcTmmMfYGGf5yja0+waM9yovJrEF+KyFuJz9uAZ8fRxnFG/BiM1ElLfYQwSFxaSv1kwWR7FPchxkY/xNE1+5vnNlHgG1dX2yeu2e7MhcolTOCkZz7q4qPuPiomNXcZFfOamNda2/Lf3bzmxfb8t3w/cR91l9FsxjjITvTNHqVSvdexQciZFS4mxSdPe5O0CKlINcRDDat/eNEFA/8lL4TQujGvuebEIZEjv25p/ZOi4VirTmOzVqNT2NVM0BTHVCOTEB9yz/6vQPquavU9z7Q7AYq0RcPF2p+pjkGzraMoDMtN+ovtgbT15kvHf5dgrRTCTjjJeICqF7RIUQl4Fo9DVupRkFS1NKIarIitMRFJBTWcPG3O1fJ2HjKjoZRq6DnmWf2PLbLbtq8/+vBFF+1uuw/yfvL9i3Oc1eOpNK9JM60xyyIFuPLK4yPnzcs+hGXvFaI9QeNiPClSIL2Nkef0qqppKJ2wrLElqzdu+Ub1xR2txcEAEnvqqedruD2hWjohzb5a18c8G9sD9XEJrOn1D/A1MwMN7fsX9gd/cmysMTQ5rXLWEPL7BAHL+qifXEy9NrtPkzlqgLQxhPmjpx2ek7hy56uOoeEhQpQ7Yks9g3h6I9Rb9ImmqPQTQoWo52ZKpbcQ4lsJ0QbMLqZRGwSUuHcUZD+1l95Pze7k6CtypqZaJkQpUZybIhq1ftJ0JSJXEKI3EUpvRsONWHYJjbEBRCGeN4LZwzTGfpGjax5vJ7tDPcjJjHBm8axu5BWfFdP8T4H266gdtnVoN3OwZ7JBdqLvtKSvKBL0sKiWTaQPtzJ54QkDqSMyjPsQlu0Usb94tPrbDwM8MMkWXTwQtUrl/g+kfvKL6nabhJ5LgWW49UlegFVB6yI6jNgRS9OnTep/dnxo0WO33747bYZqnH9+ZN//QXZYNX7aMFQL35UEGo2TB0qlUsfsjgaMlDXeIRN0VDFERyRNR4AR1Z4draI2CrghOuI6Ntxxek6GNJSj/aj0mQYTXB1MpaSucqjt3Dvi8eoLB6+5ZvBOVasgvFajaK0QBtyZD152L7SWfC2WuiDH3bMhz+o7UR5UOfbQhmuxR5PEEhK9+sYoVQ0HBN1pmk2gJ5NakW43MaQqSUA0OhZC/DRCLG03mkjpsPjJ0eYSq0mSjFSrfLbuCx8LJreFKGxwD0vzXG0rjpVUJIwAx9zGnvEs+++qjYe2P/q+E52X+YVqlR0i4fEQlZY1tzuYalxv1EYeqX69FarTCpy/d6e7PR6intjVinPNXyBpdvJrPT3DwzOVmpsWlg0T9T4DVj4jI5ijBUNTRr/3GPN69p7u2i7jCPwVIaxFepSe82Cs9mpMHqdU3oPQh3kZiPHm85NnF0GooTJKo3GcNN2PNZ5ArMp7Xr13Qmrh86v3snTPHWR6IyLXEc9bBT6AWR9mEZiimiLRKBKOU39pH7XRv0PCF3jPq4YmO67yJ+uze2+g1LuZdGw5WTadwp3r6I3aX/Kq//W2ZFvFkkTs4986uQLxN6vPQV5b4eixzKvvW3teHmN1775V9ER/i9uaYvW0Dge6EfVAlj3N83922UwXr1K5v5yFk6s9s+UqMmDIAnWPwVLxMOyeHVHVg8C+SuXo6GzVmZtu+uT8kZFohUS+SmCxYX3iquJ+3NWPqLf6hElMJkn0tV/tX1YqlQbaOWFQVxdGouzY/k6LTV150yfnxyO6KgstVScGsiAWsrGDJ08Gi+Ppf69W33dicp+33bYlfv740Apx+jJrHRfU1cZKx77xjTtPmQPcZBqVyr19WQjLQ9YYNNEBy7yfQF4d3RkVYVjdh0APQe+havWOGsWSuW3ZNhEsXJGpz59MTzAZrlbv2teJhqtv3DQY123p1DeLpmPn6/6nvnjnuFzelOB27VobHTl+fJVYusKdpYL3g0YOI2I+BHJo3ryePQ8++JvHTzUHt922JT569IWVmUpvO90A3jN28B8e/A8d+kj06spPrw1ZiJvX7FTXa1b4410D1MMymqnFTWGoUXzP1G7/PxJljCF+75WHzogOgHt39SHzVhIKPpPKML3hEA1bTqO+gCjqwzxGPcI9ArW8iogWoTc+hDeGOLo2v36d1PymY2fZoX7Sl1biuhjxAdA+3CPUR3E5TqZH0Jf28Z6fG5qO3JzbbNqzgZ6+zaS1FTmX7Yj8DdKo/w090duS766oJ4nYJ58bXeaZ3+yEGMfOyktjBqpIJtX3ru3J04U2P7sGjf8WfNW0DNLdKPWAZzt41yt+YeoOE9G+/nG+ZOtLOjT0Xbv9dtL2dZFP19bTYgxJBBcW8/jdZimufK3safucSXWa/phKBW0vedUsk9XcNt3veYzf6fU78zEdeimqgrevTz15/NYa3zP1e/r05BELE49p+3WasI8Wc06SRHftIjp69EJtv4ZF37Ocg6nX9NTzOPGY2V2vU5Exi3VgZoWqwjY7Y+lxCj3NcJxpajlOe9wM+0zYv2CUrf4Vqkwc8+4ZUxJzbrP52Wso9W6mMbYan4FBaqRY+ijiv8Tzq4+TiG1+1hec9Nobxa0X1bP0oBpmmhJk+/f//P88kCSJsenZKwjRF4EFZOn0EmRpHmTpdt698vrZj9fK8ICm6jIXC4ZN7vfHbRGyHxXaM2pgbub63GFittWPN61dzAKniovsACFxZelzl1Cat5n62OXj3qGOfhkB1b1kY7/MC6/eTSJ27y7vS8NL17iEQU5Zx/HUUPfR1OZVhx/gRJKIsXnv2xG9H/N4gkNmAn1uxL2QNv6ad6+8bVYBsF100UUXp0CzWMUwaTact8fTuXJMKExrRqmnHymtgbtJ3PXoEDVTjoh7TfC647Uz/Yh4aipDw0O0ORDCL6AhHndZji9X10afA5aBUtjHZrn+bhdddNHFDMgZZNw4QTZ2pChZNFHymqzSZul84Cou/PU4AZLrJY0bHBHXE47XBK1LpnWh7XPKttcFr5tRH3Pbz7a7cxru/04ZYUPhYe6cqSPFtiyFzJ6d+ynqoosu/rUiZ5CH1p7A2UUUj+YS2jRhMyJKlsbEPeupp2uboVBHh847JioH1b2mntZUqam3fU7ZDjXB63h04OSreo/AxrwOx8n6G9FwMWld8WncP05RXUSOIeSOnblcg7aLLrr4V4vWUonC0+CdY+Pa4Q5ZuhbRm1m4u5ck0eR6SV+M4wOWlo5khLq518y9ZqH4tP/f3m7bniHHYi/tTUQsgTzfslS6sxhzyuJTEyGgYTcuh7r2xy666GKu0JLKgj5NOnaIEGkH70wbXHEvA/8WDVfkbnTX5OVSmzcW71NPjyleV3wio/S2Txtz1NTrkqbH5WR939G1jJK4suSpMpK9EwmvIa3TvnznFIgYuGHZDsbsBFw3RyENXXTRxb92FG5vMf7XoSNktpWoB5gpk4XcIQIr///27ifEruoO4Pj3d869972ZvsQYnTCRYEIYUpmFRBoGXdVAd13ZVpe1QWiKWVYLUkrvUIrYLooUq6YuFARtCy5aKaWbDLRKrS66KLY0dkwlZpKZMB3j+ObNfef+jov73sub/2/GSSPl94FhOMx973Bn8eOce3/n98P5H7L/vapgZR7d6RPS/O++xrRGuaROm1LGIJIUErQQ6fsJWlR/06IUuVxvNqY/Or7vWt7dGWvjXlz2CGW7AVvkcImAS66i5RvMjy2Sn7zpLWONMf8fVi4Vf/HPu3H+LYQM7ZSFiquu7tWHFCWtKaF4lVA8ztzs1W4CZh6jOzhDPSx/spdm0mg5XHSFYxnqaaaFoknQlk+GFubGaeYiSn4ugfuVQ++fILpniXo3ZTtZVeVj1ePRCN4r4v9AaJ3hyl0fbPsAvTHGbGDtXvr5f7+C9w91muC4zXfbUcnqBWX7t8TiKW6Nf+fd8dAfpPJzMeEIyUhzLoER5marPtj5SQnXM+MnYeTBYZyfIKs/g8a7KNsbTLpq/trwAq3mE8wee2GrrHhjjNmO6+Gv+3Lj7L++giQvEXWUUjcPkFW2tuLTgJbvoPpL2vIa82OLOZOdjhAb5CT2H/85cP5OvDyE84+AHKVsb/0cMaIkCSBTEB7mw7FLtno0xuymleEvzx2HH95LO/wY5Nuods4vbkkRgbQ2S2vpjzh+Ra35JqfuWVj3HGg3kD3z/ii++Bo++zqRE8Sy0TvJM8iczjtUH+Ty2GsrvtcYY3bB2kiUR8fBfxwn3fNzQjGBbljdp09nJQmQZAqySFieBvkLTt6mHS+RyiKxdJRxP94fBb5EZILa0CHay/XqxU/cOjjG7vPPuqLlr/mweQpWbuuNMWY3rB8gc1GeO/8NstrPCMVoFSQHLNsdY7Wa9KnDewgBNFR9dKvVaB2fgnMQ2lAG3TSNZ+0EikuA+FdieYqZV3Zem84YYzax/vY3jw75wu9pffIsiEOcDlyUVsQRoyMUyvKSom065wHrIBkxQnsZlpd08ODYPd0TOw165AKqP2UmTG/jXo0xZls2Xhbm0XHLhb0Mhadx8k1Uldh5ntjrM9qp5r3huG+K6+lBdBqUDPD5vjFU5eLTbJ6y/AHt1svMjTdta22MuVE2Xr3lonx05Bqe76O8iEsCzmkv6PWauMsm41U5jL1CE4N+vvsVUq0c01qL0H6C1L3I3G8sOBpjbqitHyzm0THy7gF88jhJ7Vto2IeuetPcW+XJjRgr3iuRi8T4JKfHzu74bo0xZhu2fv6XizI3PovwJGUxSZJdxGdVWbQYtfNWmV7zrN0aRxSRquct7k20/C4Mv3xD/xvGGNNnsLfHuSgzx+bJ0rOE9hkiUyRZwCeuU0OyIn1b452Pq+CbZHRSh14gLJ1hf/t1Zg62dnSXxhizA37gK6cmI/fcqnz8wHka8+dQvQJ6lNrQHlQFYlldGGVNy4beKrFroz7bUqXwJGmLMryDxu8RWs8xO36JuRG1Z47GmP+lwQMkwNRU5H4RFh+4xmO3vcFXH/0dZXsJn9ZIa/Wqx7QH5yIinf1ylPWDo4A4xbkqenrfojZ0haL1JzT8BIk/4jvH3mbiQCA/qUxNbqf5tTHGfGYDZn+vo9eshxRnXwAAALtJREFU+8uOO0aPojIBch/p8HGkPEQobyfGYbzXNdNEdagqIk18chHVC4Tib0TewvNnTn/xam8OSwI3xtwkOw+QcD2Adc9b73+vQcYhXLyDUu9E/GHSZBTxDaJmAGhs4uICoZyB+AGlTEOcxV+7zMzrrV4fW2OMuck+W4Bcrb8Rd34u4fCRhI9Dxp7EsdC5xgfFF8rwcOA/RwK5hF4tSAuMxpjPkd0NkP16W3BYWfJssjPu/LagaIz5nPoUBSp4D1AF9yMAAAAASUVORK5CYII=)"]},{"cell_type":"markdown","metadata":{"id":"_8dMBi8UNtg1"},"source":["[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/langtest/blob/main/demo/tutorials/llm_notebooks/Visual_QA.ipynb)"]},{"cell_type":"markdown","metadata":{"id":"_EzC6SKhjdk7"},"source":["**LangTest** is an open-source python library designed to help developers deliver safe and effective Natural Language Processing (NLP) models. Whether you are using **John Snow Labs, Hugging Face, Spacy** models or **OpenAI, Cohere, AI21, Hugging Face Inference API and Azure-OpenAI** based LLMs, it has got you covered. You can test any Named Entity Recognition (NER), Text Classification, fill-mask, Translation model using the library. We also support testing LLMS for Question-Answering, Visual question-answering, Summarization and text-generation tasks on benchmark datasets. The library supports 60+ out of the box tests. For a complete list of supported test categories, please refer to the [documentation](http://langtest.org/docs/pages/docs/test_categories).\n","\n","Metrics are calculated by comparing the model's extractions in the original list of sentences against the extractions carried out in the noisy list of sentences. The original annotated labels are not used at any point, we are simply comparing the model against itself in a 2 settings."]},{"cell_type":"markdown","metadata":{"id":"v9Yd7KhpZOTF"},"source":["# Getting started with LangTest"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kJ-dxTWu7bcA"},"outputs":[],"source":["!pip install langtest==2.4.0"]},{"cell_type":"markdown","metadata":{"id":"cXOI5kBFlO6w"},"source":["# Harness and its Parameters\n","\n","The Harness class is a testing class for Natural Language Processing (NLP) models. It evaluates the performance of a NLP model on a given task using test data and generates a report with test results.Harness can be imported from the LangTest library in the following way."]},{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":4291,"status":"ok","timestamp":1692340616139,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"w1g27-uxl1AA"},"outputs":[],"source":["#Import Harness from the LangTest library\n","from langtest import Harness"]},{"cell_type":"markdown","metadata":{"id":"PXBMpFHIl7n9"},"source":["It imports the Harness class from within the module, that is designed to provide a blueprint or framework for conducting NLP testing, and that instances of the Harness class can be customized or configured for different testing scenarios or environments.\n","\n","Here is a list of the different parameters that can be passed to the Harness function:\n","\n","
\n","\n","\n","\n","| Parameter | Description |\n","| - | - |\n","| **task** | Task for which the model is to be evaluated (Visual Question Answering) |\n","| **model** | Specifies the model(s) to be evaluated. This parameter can be provided as either a dictionary or a list of dictionaries. Each dictionary should contain the following keys:
- model (mandatory): \tPipelineModel or path to a saved model or pretrained LLM pipeline/model from hub.
- hub (mandatory): Hub (library) to use in back-end for loading model from public models hub or from path
|\n","| **data** | The data to be used for evaluation. A dictionary providing flexibility and options for data sources. It should include the following keys: - data_source (mandatory): The source of the data.
- subset (optional): The subset of the data.
- feature_column (optional): The column containing the features.
- target_column (optional): The column containing the target labels.
- split (optional): The data split to be used.
- source (optional): Set to 'huggingface' when loading Hugging Face dataset.
|\n","| **config** | Configuration for the tests to be performed, specified in the form of a YAML file. |\n","\n","\n","
\n","
"]},{"cell_type":"markdown","metadata":{"id":"KLC_lBv09ZuN"},"source":["# Robustness Testing\n","\n","Model robustness can be described as the ability of a model to maintain similar levels of accuracy, precision, and recall when perturbations are made to the data it is predicting on. For example, In the case of images, the goal is to understand how modifications such as resizing, rotation, noise addition, or color adjustments affect the model's performance compared to the original images it was trained on.\n","\n","\n","**`Supported Robustness tests :`**
\n","\n","### Text\n","\n","| **Test Name** | **Short Description** |\n","|-------------------------------|----------------------------------------------------------------------------------------|\n","| **`uppercase`** | Capitalization of the text set is turned into uppercase |\n","| **`lowercase`** | Capitalization of the text set is turned into lowercase |\n","| **`titlecase`** | Capitalization of the text set is turned into title case |\n","| **`add_punctuation`** | Adds punctuation to the text set |\n","| **`strip_punctuation`** | Removes punctuation from the text set |\n","| **`add_typo`** | Introduces typographical errors into the text |\n","| **`swap_entities`** | Swaps named entities in the text |\n","| **`american_to_british`** | Converts American English spellings to British English |\n","| **`british_to_american`** | Converts British English spellings to American English |\n","| **`add_context`** | Adds additional context to the text set |\n","| **`add_contraction`** | Introduces contractions (e.g., do not → don't) |\n","| **`dyslexia_word_swap`** | Swaps words in a way that mimics dyslexic reading errors |\n","| **`number_to_word`** | Converts numbers to words in the text set (e.g., 1 → one) |\n","| **`add_ocr_typo`** | Adds optical character recognition (OCR) specific typos to the text |\n","| **`add_abbreviation`** | Replaces certain words with their abbreviations |\n","| **`add_speech_to_text_typo`** | Adds speech-to-text transcription errors |\n","| **`add_slangs`** | Introduces slang terms into the text |\n","| **`multiple_perturbations`** | Applies multiple perturbations to the text at once |\n","| **`adjective_synonym_swap`** | Swaps adjectives in the text with their synonyms |\n","| **`adjective_antonym_swap`** | Swaps adjectives in the text with their antonyms |\n","| **`strip_all_punctuation`** | Removes all punctuation from the text |\n","| **`randomize_age`** | Randomizes the age mentioned in the text |\n","| **`add_new_lines`** | Inserts new lines into the text set |\n","| **`add_tabs`** | Inserts tab characters into the text set |\n","\n","### Images\n","\n","| **Test Name** | **Short Description** |\n","|----------------------|--------------------------------------------------------|\n","| **`image_resize`** | Resizes the image to a different dimension |\n","| **`image_rotate`** | Rotates the image by a specified angle |\n","| **`image_blur`** | Applies a blur filter to the image |\n","| **`image_noise`** | Adds random noise to the image |\n","| **`image_contrast`** | Adjusts the contrast of the image |\n","| **`image_brightness`**| Adjusts the brightness of the image |\n","| **`image_sharpness`** | Adjusts the sharpness of the image |\n","| **`image_color`** | Adjusts the color balance of the image |\n","| **`image_flip`** | Flips the image either horizontally or vertically |\n","| **`image_crop`** | Crops a portion of the image |\n","\n","
"]},{"cell_type":"markdown","metadata":{"id":"cVIzXdGMjX47"},"source":["## Testing robustness of a pretrained LLM models\n","\n","Testing a LLM model's robustness gives us an idea on how our data may need to be modified to make the model more robust. We can use a pretrained model/pipeline or define our own custom pipeline or load a saved pre trained model to test.\n","\n","Here we are directly passing a pretrained model/pipeline from hub as the model parameter in harness and running the tests."]},{"cell_type":"markdown","metadata":{"id":"78THAZm3cRu7"},"source":["### Test Configuration\n","\n","Test configuration can be passed in the form of a YAML file as shown below or using .configure() method\n","\n","\n","**Config YAML format** :\n","```\n","tests: \n"," {\n"," \"defaults\": {\n"," \"min_pass_rate\": 0.5,\n"," },\n"," \"robustness\": {\n"," \"image_noise\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"noise_level\": 0.5\n"," }\n","\n"," },\n"," \"image_rotate\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"angle\": 45\n"," }\n"," },\n"," \"image_blur\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"radius\": 5\n"," }\n"," },\n"," \"image_resize\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"resize\": 0.5 # 0.01 to 1.0 means 1% to 100% of the original size\n"," }\n"," },\n"," }\n"," }\n"," \n","```\n","\n","If config file is not present, we can also use the **.configure()** method to manually configure the harness to perform the needed tests.\n"]},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":["import os \n","os.environ['OPENAI_API_KEY'] = \"sk-XXXXXXXX\""]},{"cell_type":"markdown","metadata":{},"source":["## Visual Question Answering (VQA)\n","\n","This notebook demonstrates how to perform a Visual Question Answering (VQA) using the `PIL` library to load images and a harness for running the task. The model being used is `gpt-4o-mini` from the OpenAI hub, and the data comes from the MMMU dataset, specifically the `Clinical_Medicine` subset."]},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"c274bf01644a432fb0e254fd1e8ebb75","version_major":2,"version_minor":0},"text/plain":["Resolving data files: 0%| | 0/60 [00:00, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"3cc1882c9281421f8b7f42f54a3999ce","version_major":2,"version_minor":0},"text/plain":["Resolving data files: 0%| | 0/32 [00:00, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Test Configuration : \n"," {}\n"]}],"source":["harness = Harness(\n"," task=\"visualqa\",\n"," model={\n"," \"model\": \"gpt-4o-mini\",\n"," \"hub\": \"openai\"\n"," },\n"," data={\"data_source\": 'MMMU/MMMU',\n"," \"subset\": \"Clinical_Medicine\",\n"," # \"feature_column\": \"question\",\n"," # \"target_column\": 'answer',\n"," \"split\": \"dev\",\n"," \"source\": \"huggingface\"\n"," },\n"," config={}\n",")"]},{"cell_type":"markdown","metadata":{"id":"jGEN7Q0Ric8H"},"source":["We can use the .configure() method to manually define our test configuration for the robustness tests."]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":91,"status":"ok","timestamp":1692340473373,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"C08dW5tue_6d","outputId":"c12433af-296e-4e9b-d2e2-cdd68f5426ea"},"outputs":[{"data":{"text/plain":["{'tests': {'defaults': {'min_pass_rate': 0.5},\n"," 'robustness': {'image_noise': {'min_pass_rate': 0.5,\n"," 'parameters': {'noise_level': 0.5}},\n"," 'image_rotate': {'min_pass_rate': 0.5, 'parameters': {'angle': 55}},\n"," 'image_blur': {'min_pass_rate': 0.5, 'parameters': {'radius': 5}},\n"," 'image_resize': {'min_pass_rate': 0.5, 'parameters': {'resize': 0.5}}}}}"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["harness.configure({\n"," \"tests\": {\n"," \"defaults\": {\n"," \"min_pass_rate\": 0.5,\n"," },\n"," \"robustness\": {\n"," \"image_noise\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"noise_level\": 0.5\n"," }\n","\n"," },\n"," \"image_rotate\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"angle\": 55\n"," }\n"," },\n"," \"image_blur\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"radius\": 5\n"," }\n"," },\n"," \"image_resize\": {\n"," \"min_pass_rate\": 0.5,\n"," \"parameters\": {\n"," \"resize\": 0.5 # 0.01 to 1.0 means 1% to 100% of the original size\n"," }\n"," },\n"," }\n"," }\n","})"]},{"cell_type":"markdown","metadata":{"id":"FLLzeE_Pix2W"},"source":["Here we have configured the harness to perform image robustness tests (image_blur, image_resize, image_rotate, and image_noise) and defined the minimum pass rate for each test."]},{"cell_type":"markdown","metadata":{},"source":["To ensure we work with a smaller subset of data, we'll limit the dataset to the first 50 entries. This is useful for faster prototyping and testing without needing to process the entire dataset.\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["harness.data = harness.data[:50]"]},{"cell_type":"markdown","metadata":{},"source":["In this section, we will reset the test cases in the `Harness` object by setting `harness._testcases` to `None`. This can be useful if you want to clear any previously loaded test cases or start fresh without any predefined cases.\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["harness._testcases = None"]},{"cell_type":"markdown","metadata":{"id":"MomLlmTwjpzU"},"source":["\n","### Generating the test cases.\n","\n","\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":23034,"status":"ok","timestamp":1692340496325,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"njyA7h_tfMVo","outputId":"481382ae-630d-4c62-d6d8-c8108982df89"},"outputs":[{"name":"stderr","output_type":"stream","text":["Generating testcases...: 100%|██████████| 1/1 [00:00, ?it/s]\n"]},{"data":{"text/plain":[]},"execution_count":5,"metadata":{},"output_type":"execute_result"}],"source":["harness.generate()"]},{"cell_type":"markdown","metadata":{"id":"C_qyYdl8FYoD"},"source":["harness.generate() method automatically generates the test cases (based on the provided configuration)"]},{"cell_type":"markdown","metadata":{},"source":["This code snippet will display an HTML table based on the DataFrame returned by `harness.testcases()`. The `escape=False` parameter allows HTML content within the DataFrame to be rendered without escaping special characters."]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"data":{"text/html":["\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," original_image | \n"," perturbed_image | \n"," question | \n"," options | \n","
\n"," \n"," \n"," \n"," 3 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," What person's name is associated with the fracture shown below? | \n"," A. Monteggia\\nB. Bennett\\nC. Jones\\nD. Smith | \n","
\n"," \n"," 15 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n","
\n"," \n"," 6 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," A 56-year-old woman is undergoing chemotherapy for treatment of breast carcinoma. The gross appearance of her skin shown here is most typical for which of the following conditions? | \n"," A. Thrombocytopenia\\nB. Gangrene\\nC. Congestive heart failure\\nD. Metastatic breast carcinoma | \n","
\n"," \n"," 18 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," What person's name is associated with the fracture shown below? | \n"," A. Monteggia\\nB. Bennett\\nC. Jones\\nD. Smith | \n","
\n"," \n"," 17 | \n"," robustness | \n"," image_resize | \n"," | \n"," | \n"," Based on , what's the most likely diagnosis? | \n"," A. first degree atrioventricular block\\nB. third degree atrioventricular block\\nC. Second degree type II atrioventricular block\\nD. atrial flutter | \n","
\n"," \n","
"],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from IPython.display import display, HTML\n","\n","\n","df = harness.testcases()\n","html=df.sample(5).to_html(escape=False)\n","\n","display(HTML(html))"]},{"cell_type":"markdown","metadata":{"id":"fRyNPRBokXNZ"},"source":["### Running the tests."]},{"cell_type":"code","execution_count":12,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":68268,"status":"ok","timestamp":1692340564519,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"3kUPTsNvjkgr","outputId":"4c4815e4-4cab-4dbf-99ba-1a231656f1e3"},"outputs":[{"name":"stderr","output_type":"stream","text":["Running testcases... : 100%|██████████| 20/20 [00:44<00:00, 2.21s/it]\n"]},{"data":{"text/plain":[]},"execution_count":12,"metadata":{},"output_type":"execute_result"}],"source":["harness.run()"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"data":{"text/html":["\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," original_image | \n"," perturbed_image | \n"," question | \n"," options | \n"," expected_result | \n"," actual_result | \n"," pass | \n","
\n"," \n"," \n"," \n"," 5 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n"," 4 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," The best diagnosis for the appendix is: | \n"," A. simple appendicitis\\nB. appendix abscess\\nC. normal appendix\\nD. cellulite appendicitis | \n"," Answer: UnRecognizable. | \n"," I'm unable to recognize the content of the image. Thus, I cannot determine the correct diagnosis for the appendix. \\n\\nAnswer: UnRecognizable. | \n"," False | \n","
\n"," \n"," 7 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," Based on , what's the most likely diagnosis? | \n"," A. first degree atrioventricular block\\nB. third degree atrioventricular block\\nC. Second degree type II atrioventricular block\\nD. atrial flutter | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n"," 9 | \n"," robustness | \n"," image_rotate | \n"," | \n"," | \n"," The best diagnosis for the appendix is: | \n"," A. simple appendicitis\\nB. appendix abscess\\nC. normal appendix\\nD. cellulite appendicitis | \n"," Answer: UnRecognizable. | \n"," Answer: A. simple appendicitis. | \n"," False | \n","
\n"," \n"," 0 | \n"," robustness | \n"," image_noise | \n"," | \n"," | \n"," Identify the following rhythm: | \n"," A. Sinus Rhythm with PAC's\\nB. Junctional Rhythm\\nC. 2nd Degree AV Block, Type I\\nD. 3rd Degree AV Block\\nE. Normal Sinus Rhythm with PVC's\\nF. Idioventricular Rhythm | \n"," Answer: UnRecognizable. | \n"," Answer: UnRecognizable. | \n"," True | \n","
\n"," \n","
"],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from IPython.display import display, HTML\n","\n","\n","df = harness.generated_results()\n","html=df.sample(5).to_html(escape=False)\n","\n","display(HTML(html))"]},{"cell_type":"markdown","metadata":{},"source":["Called after harness.generate() and is to used to run all the tests. Returns a pass/fail flag for each test."]},{"cell_type":"markdown","metadata":{"id":"106TE41ffw43"},"source":["This method returns the generated results in the form of a pandas dataframe, which provides a convenient and easy-to-use format for working with the test results. You can use this method to quickly identify the test cases that failed and to determine where fixes are needed."]},{"cell_type":"markdown","metadata":{"id":"_0gnozMlkoF0"},"source":["### Report of the tests"]},{"cell_type":"code","execution_count":15,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":112},"executionInfo":{"elapsed":22,"status":"ok","timestamp":1692340564522,"user":{"displayName":"Prikshit sharma","userId":"07819241395213139913"},"user_tz":-330},"id":"YKFvMs0RGHO7","outputId":"3a0ed33b-aa59-4e98-86d0-8d407391b0e4"},"outputs":[{"data":{"text/html":["\n","\n","
\n"," \n"," \n"," | \n"," category | \n"," test_type | \n"," fail_count | \n"," pass_count | \n"," pass_rate | \n"," minimum_pass_rate | \n"," pass | \n","
\n"," \n"," \n"," \n"," 0 | \n"," robustness | \n"," image_noise | \n"," 3 | \n"," 2 | \n"," 40% | \n"," 50% | \n"," False | \n","
\n"," \n"," 1 | \n"," robustness | \n"," image_rotate | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n"," 2 | \n"," robustness | \n"," image_blur | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n"," 3 | \n"," robustness | \n"," image_resize | \n"," 2 | \n"," 3 | \n"," 60% | \n"," 50% | \n"," True | \n","
\n"," \n","
\n","
"],"text/plain":[" category test_type fail_count pass_count pass_rate \\\n","0 robustness image_noise 3 2 40% \n","1 robustness image_rotate 2 3 60% \n","2 robustness image_blur 2 3 60% \n","3 robustness image_resize 2 3 60% \n","\n"," minimum_pass_rate pass \n","0 50% False \n","1 50% True \n","2 50% True \n","3 50% True "]},"execution_count":15,"metadata":{},"output_type":"execute_result"}],"source":["harness.report()"]},{"cell_type":"markdown","metadata":{"id":"bSP2QL6agTH_"},"source":["Called after harness.run() and it summarizes the results giving information about pass and fail counts and overall test pass/fail flag."]}],"metadata":{"accelerator":"GPU","colab":{"machine_shape":"hm","provenance":[],"toc_visible":true},"gpuClass":"standard","kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.8.10"}},"nbformat":4,"nbformat_minor":0}
diff --git a/langtest/datahandler/datasource.py b/langtest/datahandler/datasource.py
index 4de9999f4..c12a11662 100644
--- a/langtest/datahandler/datasource.py
+++ b/langtest/datahandler/datasource.py
@@ -95,6 +95,12 @@
"anti-stereotype": ["anti-stereotype"],
"unrelated": ["unrelated"],
},
+ "visualqa": {
+ "image": ["image", "image_1"],
+ "question": ["question"],
+ "options": ["options"],
+ "answer": ["answer"],
+ },
}
@@ -183,7 +189,7 @@ def __init__(self, file_path: Union[str, dict], task: TaskManager, **kwargs) ->
raise ValueError(Errors.E024)
if "data_source" not in file_path:
- raise ValueError(Errors.E025)
+ raise ValueError(Errors.E025())
self._custom_label = file_path.copy()
self._file_path = file_path.get("data_source")
self._size = None
@@ -1246,6 +1252,7 @@ class HuggingFaceDataset(BaseDataset):
"summarization",
"ner",
"question-answering",
+ "visualqa",
]
LIB_NAME = "datasets"
@@ -1709,6 +1716,7 @@ class PandasDataset(BaseDataset):
"legal",
"factuality",
"stereoset",
+ "visualqa",
]
COLUMN_NAMES = {task: COLUMN_MAPPER[task] for task in supported_tasks}
diff --git a/langtest/langtest.py b/langtest/langtest.py
index d7a1f15cd..09df1b57d 100644
--- a/langtest/langtest.py
+++ b/langtest/langtest.py
@@ -605,6 +605,7 @@ def generated_results(self) -> Optional[pd.DataFrame]:
"model_name",
"category",
"test_type",
+ "original_image",
"original",
"context",
"prompt",
@@ -613,8 +614,10 @@ def generated_results(self) -> Optional[pd.DataFrame]:
"completion",
"test_case",
"perturbed_context",
+ "perturbed_image",
"perturbed_question",
"sentence",
+ "question",
"patient_info_A",
"patient_info_B",
"case",
@@ -838,6 +841,7 @@ def testcases(self, additional_cols=False) -> pd.DataFrame:
"model_name",
"category",
"test_type",
+ "original_image",
"original",
"context",
"original_context",
@@ -863,7 +867,9 @@ def testcases(self, additional_cols=False) -> pd.DataFrame:
"correct_sentence",
"incorrect_sentence",
"perturbed_context",
+ "perturbed_image",
"perturbed_question",
+ "question",
"ground_truth",
"options",
"expected_result",
diff --git a/langtest/modelhandler/llm_modelhandler.py b/langtest/modelhandler/llm_modelhandler.py
index 35b14691c..968928e12 100644
--- a/langtest/modelhandler/llm_modelhandler.py
+++ b/langtest/modelhandler/llm_modelhandler.py
@@ -13,6 +13,7 @@
import logging
from functools import lru_cache
from langtest.utils.custom_types.helpers import HashableDict
+from langchain.chat_models.base import BaseChatModel
class PretrainedModelForQA(ModelAPI):
@@ -80,7 +81,7 @@ def load_model(cls, hub: str, path: str, *args, **kwargs) -> "PretrainedModelFor
try:
cls._update_model_parameters(hub, filtered_kwargs)
if path in (
- "gpt-4o",
+ "gpt-4o-mini",
"gpt-4",
"gpt-3.5-turbo",
"gpt-4-1106-preview",
@@ -452,3 +453,57 @@ class PretrainedModelForSycophancy(PretrainedModelForQA, ModelAPI):
"""
pass
+
+
+class PretrainedModelForVisualQA(PretrainedModelForQA, ModelAPI):
+ """A class representing a pretrained model for visual question answering.
+
+ Inherits:
+ PretrainedModelForQA: The base class for pretrained models.
+ """
+
+ @lru_cache(maxsize=102400)
+ def predict(
+ self, text: Union[str, dict], prompt: dict, images: List[Any], *args, **kwargs
+ ):
+ """Perform prediction using the pretrained model.
+
+ Args:
+ text (Union[str, dict]): The input text or dictionary.
+ prompt (dict): The prompt configuration.
+ images (List[Any]): The list of images.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ dict: A dictionary containing the prediction result.
+ - 'result': The prediction result.
+ """
+ try:
+ if not isinstance(self.model, BaseChatModel):
+ ValueError("visualQA task is only supported for chat models")
+
+ # prepare prompt
+ prompt_template = PromptTemplate(**prompt)
+ from langchain_core.messages import HumanMessage
+
+ images = [
+ {
+ "type": "image_url",
+ "image_url": {"url": image},
+ }
+ for image in images
+ ]
+
+ messages = HumanMessage(
+ content=[
+ {"type": "text", "text": prompt_template.format(**text)},
+ *images,
+ ]
+ )
+
+ response = self.model.invoke([messages])
+ return response.content
+
+ except Exception as e:
+ raise ValueError(Errors.E089(error_message=e))
diff --git a/langtest/modelhandler/promptguard.py b/langtest/modelhandler/promptguard.py
new file mode 100644
index 000000000..93d417f1d
--- /dev/null
+++ b/langtest/modelhandler/promptguard.py
@@ -0,0 +1,128 @@
+class PromptGuard:
+ _instance = None
+
+ def __new__(cls, model_name: str = "meta-llama/Prompt-Guard-86M", device="cpu"):
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ cls._instance.model_name = model_name
+ cls._instance.device = device
+ (
+ cls._instance.model,
+ cls._instance.tokenizer,
+ ) = cls._instance._load_model_and_tokenizer()
+ return cls._instance
+
+ def __init__(
+ self, model_name: str = "meta-llama/Prompt-Guard-86M", device="cpu"
+ ) -> None:
+ self.model_name = "meta-llama/Prompt-Guard-86M"
+ self.device = "cpu"
+ self.model, self.tokenizer = self._load_model_and_tokenizer()
+
+ def _load_model_and_tokenizer(self):
+ """
+ Load the model and tokenizer from Hugging Face.
+ """
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
+
+ model = AutoModelForSequenceClassification.from_pretrained(self.model_name).to(
+ self.device
+ )
+ tokenizer = AutoTokenizer.from_pretrained(self.model_name)
+ return model, tokenizer
+
+ def _preprocess_text(self, text):
+ """
+ Preprocess the input text by removing spaces to mitigate prompt injection tactics.
+ """
+ cleaned_text = "".join([char for char in text if not char.isspace()])
+ tokens = self.tokenizer.tokenize(cleaned_text)
+ result = " ".join(
+ [self.tokenizer.convert_tokens_to_string([token]) for token in tokens]
+ )
+ return result or text
+
+ def _get_class_probabilities(self, texts, temperature=1.0, preprocess=True):
+ """
+ Internal method to get class probabilities for a single or batch of texts.
+ """
+ import torch
+ from torch.nn.functional import softmax
+
+ if preprocess:
+ texts = [self._preprocess_text(text) for text in texts]
+
+ inputs = self.tokenizer(
+ texts, return_tensors="pt", padding=True, truncation=True, max_length=512
+ )
+ inputs = inputs.to(self.device)
+
+ with torch.no_grad():
+ logits = self.model(**inputs).logits
+
+ probabilities = softmax(logits / temperature, dim=-1)
+ return probabilities
+
+ def get_jailbreak_score(self, text, temperature=1.0, preprocess=True):
+ """
+ Get jailbreak score for a single input text.
+ """
+ probabilities = self._get_class_probabilities([text], temperature, preprocess)
+ return probabilities[0, 2].item()
+
+ def get_indirect_injection_score(self, text, temperature=1.0, preprocess=True):
+ """
+ Get indirect injection score for a single input text.
+ """
+ probabilities = self._get_class_probabilities([text], temperature, preprocess)
+ return (probabilities[0, 1] + probabilities[0, 2]).item()
+
+ def _process_text_batch(
+ self, texts, score_indices, temperature=1.0, max_batch_size=16, preprocess=True
+ ):
+ """
+ Internal method to process texts in batches and return scores.
+ """
+ import torch
+
+ num_texts = len(texts)
+ all_scores = torch.zeros(num_texts)
+
+ for i in range(0, num_texts, max_batch_size):
+ batch_texts = texts[i : i + max_batch_size]
+ probabilities = self._get_class_probabilities(
+ batch_texts, temperature, preprocess
+ )
+ batch_scores = probabilities[:, score_indices].sum(dim=1).cpu()
+
+ all_scores[i : i + max_batch_size] = batch_scores
+
+ return all_scores.tolist()
+
+ def get_jailbreak_scores_for_texts(
+ self, texts, temperature=1.0, max_batch_size=16, preprocess=True
+ ):
+ """
+ Get jailbreak scores for a batch of texts.
+ """
+ return self._process_text_batch(
+ texts,
+ score_indices=[2],
+ temperature=temperature,
+ max_batch_size=max_batch_size,
+ preprocess=preprocess,
+ )
+
+ def get_indirect_injection_scores_for_texts(
+ self, texts, temperature=1.0, max_batch_size=16, preprocess=True
+ ):
+ """
+ Get indirect injection scores for a batch of texts.
+ """
+ return self._process_text_batch(
+ texts,
+ score_indices=[1, 2],
+ temperature=temperature,
+ max_batch_size=max_batch_size,
+ preprocess=preprocess,
+ )
diff --git a/langtest/tasks/task.py b/langtest/tasks/task.py
index 93af99114..0e5134eae 100644
--- a/langtest/tasks/task.py
+++ b/langtest/tasks/task.py
@@ -851,3 +851,44 @@ def create_sample(
class FillMask(BaseTask):
pass
+
+
+class VisualQA(BaseTask):
+ _name = "visualqa"
+ _default_col = {
+ "image": ["image"],
+ "question": ["question"],
+ "answer": ["answer"],
+ }
+ sample_class = samples.VisualQASample
+
+ def create_sample(
+ cls,
+ row_data: dict,
+ image: str = "image_1",
+ question: str = "question",
+ options: str = "options",
+ answer: str = "answer",
+ dataset_name: str = "",
+ ) -> samples.VisualQASample:
+ """Create a sample."""
+ keys = list(row_data.keys())
+
+ # auto-detect the default column names from the row_data
+ column_mapper = cls.column_mapping(keys, [image, question, options, answer])
+
+ options = row_data.get(column_mapper.get(options, "-"), "-")
+
+ if len(options) > 3 and options[0] == "[" and options[-1] == "]":
+ options = ast.literal_eval(row_data[column_mapper["options"]])
+ options = "\n".join(
+ [f"{chr(65 + i)}. {option}" for i, option in enumerate(options)]
+ )
+
+ return samples.VisualQASample(
+ original_image=row_data[column_mapper[image]],
+ question=row_data[column_mapper[question]],
+ options=options,
+ expected_result=row_data[column_mapper[answer]],
+ dataset_name=dataset_name,
+ )
diff --git a/langtest/transform/__init__.py b/langtest/transform/__init__.py
index 3cb59ebd6..0c4f41c9b 100644
--- a/langtest/transform/__init__.py
+++ b/langtest/transform/__init__.py
@@ -22,6 +22,8 @@
from langtest.transform.grammar import GrammarTestFactory
from langtest.transform.safety import SafetyTestFactory
+from langtest.transform import image
+
# Fixing the asyncio event loop
nest_asyncio.apply()
@@ -47,4 +49,5 @@
SycophancyTestFactory,
GrammarTestFactory,
SafetyTestFactory,
+ image,
]
diff --git a/langtest/transform/accuracy.py b/langtest/transform/accuracy.py
index eb78de8c6..1125a5e72 100644
--- a/langtest/transform/accuracy.py
+++ b/langtest/transform/accuracy.py
@@ -2,7 +2,7 @@
from collections import defaultdict
import pandas as pd
from abc import ABC, abstractmethod
-from typing import Any, Dict, List
+from typing import Any, DefaultDict, Dict, List, Type
from langtest.modelhandler.modelhandler import ModelAPI
from langtest.transform.base import ITests
@@ -15,7 +15,12 @@
)
from langtest.utils.custom_types.helpers import default_user_prompt
from langtest.errors import Errors
-from langtest.utils.util_metrics import calculate_f1_score, classification_report
+from langtest.utils.util_metrics import (
+ calculate_f1_score,
+ calculate_f1_score_multi_label,
+ classification_report,
+ classification_report_multi_label,
+)
class AccuracyTestFactory(ITests):
@@ -98,7 +103,7 @@ def transform(self) -> List[Sample]:
return all_samples
@staticmethod
- def available_tests() -> dict:
+ def available_tests() -> DefaultDict[str, Type["BaseAccuracy"]]:
"""
Get a dictionary of all available tests, with their names as keys and their corresponding classes as values.
@@ -151,6 +156,7 @@ def predict_ner(sample):
y_true = y_true.apply(lambda x: x.split("-")[-1])
elif isinstance(raw_data_copy[0], SequenceClassificationSample):
+ is_mutli_label = raw_data_copy[0].expected_results.multi_label
def predict_text_classification(sample):
prediction = model.predict(sample.original)
@@ -166,11 +172,16 @@ def predict_text_classification(sample):
y_pred = pd.Series(raw_data_copy).apply(
lambda x: [y.label for y in x.actual_results.predictions]
)
- y_true = y_true.apply(lambda x: x[0])
- y_pred = y_pred.apply(lambda x: x[0])
- y_true = y_true.explode()
- y_pred = y_pred.explode()
+ if is_mutli_label:
+ kwargs["is_multi_label"] = is_mutli_label
+
+ else:
+ y_true = y_true.apply(lambda x: x[0])
+ y_pred = y_pred.apply(lambda x: x[0])
+
+ y_true = y_true.explode()
+ y_pred = y_pred.explode()
elif raw_data_copy[0].task == "question-answering":
from ..utils.custom_types.helpers import build_qa_input, build_qa_prompt
@@ -254,7 +265,7 @@ class BaseAccuracy(ABC):
transform(data: List[Sample]) -> Any: Transforms the input data into an output based on the implemented accuracy measure.
"""
- test_types = defaultdict(lambda: BaseAccuracy)
+ test_types: DefaultDict[str, Type["BaseAccuracy"]] = defaultdict(lambda: BaseAccuracy)
alias_name = None
supported_tasks = ["ner", "text-classification"]
@@ -374,7 +385,13 @@ async def run(
y_pred (List[Any]): Predicted values
"""
progress = kwargs.get("progress_bar", False)
- df_metrics = classification_report(y_true, y_pred, zero_division=0)
+ is_multi_label = kwargs.get("is_multi_label", False)
+ if is_multi_label:
+ df_metrics = classification_report_multi_label(
+ y_true, y_pred, zero_division=0
+ )
+ else:
+ df_metrics = classification_report(y_true, y_pred, zero_division=0)
df_metrics.pop("macro avg")
for idx, sample in enumerate(sample_list):
@@ -454,7 +471,13 @@ async def run(
"""
progress = kwargs.get("progress_bar", False)
- df_metrics = classification_report(y_true, y_pred, zero_division=0)
+ is_multi_label = kwargs.get("is_multi_label", False)
+ if is_multi_label:
+ df_metrics = classification_report_multi_label(
+ y_true, y_pred, zero_division=0
+ )
+ else:
+ df_metrics = classification_report(y_true, y_pred, zero_division=0)
df_metrics.pop("macro avg")
for idx, sample in enumerate(sample_list):
@@ -531,8 +554,13 @@ async def run(
"""
progress = kwargs.get("progress_bar", False)
-
- df_metrics = classification_report(y_true, y_pred, zero_division=0)
+ is_multi_label = kwargs.get("is_multi_label", False)
+ if is_multi_label:
+ df_metrics = classification_report_multi_label(
+ y_true, y_pred, zero_division=0
+ )
+ else:
+ df_metrics = classification_report(y_true, y_pred, zero_division=0)
df_metrics.pop("macro avg")
for idx, sample in enumerate(sample_list):
@@ -599,8 +627,14 @@ async def run(
"""
progress = kwargs.get("progress_bar", False)
+ is_multi_label = kwargs.get("is_multi_label", False)
- f1 = calculate_f1_score(y_true, y_pred, average="micro", zero_division=0)
+ if is_multi_label:
+ f1 = calculate_f1_score_multi_label(
+ y_true, y_pred, average="micro", zero_division=0
+ )
+ else:
+ f1 = calculate_f1_score(y_true, y_pred, average="micro", zero_division=0)
for sample in sample_list:
sample.actual_results = MinScoreOutput(min_score=f1)
@@ -664,7 +698,14 @@ async def run(
"""
progress = kwargs.get("progress_bar", False)
- f1 = calculate_f1_score(y_true, y_pred, average="macro", zero_division=0)
+ is_multi_label = kwargs.get("is_multi_label", False)
+
+ if is_multi_label:
+ f1 = calculate_f1_score_multi_label(
+ y_true, y_pred, average="macro", zero_division=0
+ )
+ else:
+ f1 = calculate_f1_score(y_true, y_pred, average="macro", zero_division=0)
for sample in sample_list:
sample.actual_results = MinScoreOutput(min_score=f1)
@@ -726,7 +767,14 @@ async def run(
"""
progress = kwargs.get("progress_bar", False)
- f1 = calculate_f1_score(y_true, y_pred, average="weighted", zero_division=0)
+ is_multi_label = kwargs.get("is_multi_label", False)
+
+ if is_multi_label:
+ f1 = calculate_f1_score_multi_label(
+ y_true, y_pred, average="weighted", zero_division=0
+ )
+ else:
+ f1 = calculate_f1_score(y_true, y_pred, average="weighted", zero_division=0)
for sample in sample_list:
sample.actual_results = MinScoreOutput(min_score=f1)
diff --git a/langtest/transform/fairness.py b/langtest/transform/fairness.py
index eb72b5e89..a352bae98 100644
--- a/langtest/transform/fairness.py
+++ b/langtest/transform/fairness.py
@@ -15,7 +15,7 @@
SequenceClassificationSample,
Sample,
)
-from langtest.utils.util_metrics import calculate_f1_score
+from langtest.utils.util_metrics import calculate_f1_score, calculate_f1_score_multi_label
from langtest.utils.custom_types.helpers import default_user_prompt
from langtest.errors import Errors
from langtest.transform.base import ITests
@@ -138,6 +138,7 @@ def predict_ner(sample: Sample):
)
elif isinstance(data[0], SequenceClassificationSample):
+ is_mutli_label = raw_data_copy[0].expected_results.multi_label
def predict_text_classification(sample: Sample):
prediction = model.predict(sample.original)
@@ -154,11 +155,16 @@ def predict_text_classification(sample: Sample):
y_pred = pd.Series(data).apply(
lambda x: [y.label for y in x.actual_results.predictions]
)
- y_true = y_true.apply(lambda x: x[0])
- y_pred = y_pred.apply(lambda x: x[0])
- y_true = y_true.explode()
- y_pred = y_pred.explode()
+ if is_mutli_label:
+ kwargs["is_multi_label"] = is_mutli_label
+
+ else:
+ y_true = y_true.apply(lambda x: x[0])
+ y_pred = y_pred.apply(lambda x: x[0])
+
+ y_true = y_true.explode()
+ y_pred = y_pred.explode()
elif data[0].task == "question-answering":
from ..utils.custom_types.helpers import (
@@ -406,12 +412,26 @@ async def run(
List[MinScoreSample]: The evaluated data samples.
"""
progress = kwargs.get("progress_bar", False)
+
+ is_multi_label = kwargs.get("is_multi_label", False)
+
for sample in sample_list:
data = grouped_label[sample.test_case]
if len(data[0]) > 0:
- macro_f1_score = calculate_f1_score(
- data[0].to_list(), data[1].to_list(), average="macro", zero_division=0
- )
+ if is_multi_label:
+ macro_f1_score = calculate_f1_score_multi_label(
+ data[0].to_list(),
+ data[1].to_list(),
+ average="macro",
+ zero_division=0,
+ )
+ else:
+ macro_f1_score = calculate_f1_score(
+ data[0].to_list(),
+ data[1].to_list(),
+ average="macro",
+ zero_division=0,
+ )
else:
macro_f1_score = 1
@@ -493,13 +513,25 @@ async def run(
List[MaxScoreSample]: The evaluated data samples.
"""
progress = kwargs.get("progress_bar", False)
+ is_multi_label = kwargs.get("is_multi_label", False)
for sample in sample_list:
data = grouped_label[sample.test_case]
if len(data[0]) > 0:
- macro_f1_score = calculate_f1_score(
- data[0].to_list(), data[1].to_list(), average="macro", zero_division=0
- )
+ if is_multi_label:
+ macro_f1_score = calculate_f1_score_multi_label(
+ data[0].to_list(),
+ data[1].to_list(),
+ average="macro",
+ zero_division=0,
+ )
+ else:
+ macro_f1_score = calculate_f1_score(
+ data[0].to_list(),
+ data[1].to_list(),
+ average="macro",
+ zero_division=0,
+ )
else:
macro_f1_score = 1
diff --git a/langtest/transform/image/__init__.py b/langtest/transform/image/__init__.py
new file mode 100644
index 000000000..f02586ce0
--- /dev/null
+++ b/langtest/transform/image/__init__.py
@@ -0,0 +1,3 @@
+from .robustness import ImageResizing, ImageRotation, ImageBlur, ImageNoise
+
+__all__ = [ImageResizing, ImageRotation, ImageBlur, ImageNoise]
diff --git a/langtest/transform/image/robustness.py b/langtest/transform/image/robustness.py
new file mode 100644
index 000000000..3444abfe9
--- /dev/null
+++ b/langtest/transform/image/robustness.py
@@ -0,0 +1,286 @@
+import random
+from typing import List, Tuple, Union
+from langtest.logger import logger
+from langtest.transform.robustness import BaseRobustness
+from langtest.utils.custom_types.sample import Sample
+from PIL import Image, ImageFilter
+
+
+class ImageResizing(BaseRobustness):
+ alias_name = "image_resize"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ resize: Union[float, Tuple[int, int]] = 0.5,
+ *args,
+ **kwargs,
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_resize"
+ if isinstance(resize, float):
+ sample.perturbed_image = sample.original_image.resize(
+ (
+ int(sample.original_image.width * resize),
+ int(sample.original_image.height * resize),
+ )
+ )
+ else:
+ sample.perturbed_image = sample.original_image.resize(resize)
+
+ return sample_list
+
+
+class ImageRotation(BaseRobustness):
+ alias_name = "image_rotate"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], angle: int = 90, exapand=True, *args, **kwargs
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_rotate"
+ sample.perturbed_image = sample.original_image.rotate(angle, expand=True)
+
+ return sample_list
+
+
+class ImageBlur(BaseRobustness):
+ alias_name = "image_blur"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], radius: int = 2, *args, **kwargs
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_blur"
+ sample.perturbed_image = sample.original_image.filter(
+ ImageFilter.GaussianBlur(radius)
+ )
+
+ return sample_list
+
+
+class ImageNoise(BaseRobustness):
+ alias_name = "image_noise"
+ supported_tasks = ["visualqa"]
+
+ @classmethod
+ def transform(
+ cls, sample_list: List[Sample], noise: float = 0.1, *args, **kwargs # Noise level
+ ) -> List[Sample]:
+ try:
+ if noise < 0 or noise > 1:
+ raise ValueError("Noise level must be in the range [0, 1].")
+
+ # Get image size
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_noise"
+ sample.perturbed_image = cls.add_noise(
+ image=sample.original_image, noise_level=noise
+ )
+ return sample_list
+
+ except Exception as e:
+ logger.error(f"Error in adding noise to the image: {e}")
+ raise e
+
+ @staticmethod
+ def add_noise(image: Image.Image, noise_level: float) -> Image:
+ width, height = image.size
+
+ # Create a new image to hold the noisy version
+ noisy_image = image.copy()
+ pixels = noisy_image.load() # Access pixel data
+
+ # Check if the image is grayscale or RGB
+ if image.mode == "L": # Grayscale image
+ for x in range(width):
+ for y in range(height):
+ # Get the pixel value
+ gray = image.getpixel((x, y))
+
+ # Generate random noise
+ noise_gray = int(random.gauss(0, 255 * noise_level))
+
+ # Add noise and clip the value to stay in [0, 255]
+ new_gray = max(0, min(255, gray + noise_gray))
+
+ # Set the new pixel value
+ pixels[x, y] = new_gray
+
+ elif image.mode == "RGB": # Color image
+ for x in range(width):
+ for y in range(height):
+ r, g, b = image.getpixel((x, y)) # Get the RGB values of the pixel
+
+ # Generate random noise for each channel
+ noise_r = int(random.gauss(0, 255 * noise_level))
+ noise_g = int(random.gauss(0, 255 * noise_level))
+ noise_b = int(random.gauss(0, 255 * noise_level))
+
+ # Add noise to each channel and clip values to stay in range [0, 255]
+ new_r = max(0, min(255, r + noise_r))
+ new_g = max(0, min(255, g + noise_g))
+ new_b = max(0, min(255, b + noise_b))
+
+ # Set the new pixel value
+ pixels[x, y] = (new_r, new_g, new_b)
+
+ else:
+ raise ValueError("The input image must be in 'L' (grayscale) or 'RGB' mode.")
+
+ return noisy_image
+
+
+class ImageConstrast(BaseRobustness):
+ alias_name = "image_contrast"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], contrast_factor: float = 0.5, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if contrast_factor < 0:
+ raise ValueError("Contrast factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_contrast"
+ img = ImageEnhance.Contrast(sample.original_image)
+ sample.perturbed_image = img.enhance(contrast_factor)
+
+ return sample_list
+
+
+class ImageBrightness(BaseRobustness):
+ alias_name = "image_brightness"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], brightness_factor: float = 0.3, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if brightness_factor < 0:
+ raise ValueError("Brightness factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_brightness"
+ enchancer = ImageEnhance.Brightness(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(brightness_factor)
+
+ return sample_list
+
+
+class ImageSharpness(BaseRobustness):
+ alias_name = "image_sharpness"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], sharpness_factor: float = 1.5, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if sharpness_factor < 0:
+ raise ValueError("Sharpness factor must be above 0.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_sharpness"
+ enchancer = ImageEnhance.Sharpness(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(sharpness_factor)
+
+ return sample_list
+
+
+class ImageColor(BaseRobustness):
+ 3
+ alias_name = "image_color"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], color_factor: float = 0, *args, **kwargs
+ ) -> List[Sample]:
+ from PIL import ImageEnhance
+
+ if color_factor < 0:
+ raise ValueError("Color factor must be in the range [0, inf].")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_color"
+ enchancer = ImageEnhance.Color(sample.original_image)
+ sample.perturbed_image = enchancer.enhance(color_factor)
+
+ return sample_list
+
+
+class ImageFlip(BaseRobustness):
+ alias_name = "image_flip"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample], flip: str = "horizontal", *args, **kwargs
+ ) -> List[Sample]:
+ if flip not in ["horizontal", "vertical"]:
+ raise ValueError("Flip must be either 'horizontal' or 'vertical'.")
+
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_flip"
+ if flip == "horizontal":
+ sample.perturbed_image = sample.original_image.transpose(
+ Image.FLIP_LEFT_RIGHT
+ )
+ else:
+ sample.perturbed_image = sample.original_image.transpose(
+ Image.FLIP_TOP_BOTTOM
+ )
+
+ return sample_list
+
+
+class ImageCrop(BaseRobustness):
+ alias_name = "image_crop"
+ supported_tasks = ["visualqa"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ crop_size: Union[float, Tuple[int, int]] = (100, 100),
+ *args,
+ **kwargs,
+ ) -> List[Sample]:
+ for sample in sample_list:
+ sample.category = "robustness"
+ sample.test_type = "image_crop"
+ if isinstance(crop_size, float):
+ sample.perturbed_image = sample.original_image.crop(
+ (
+ 0,
+ 0,
+ int(sample.original_image.width * crop_size),
+ int(sample.original_image.height * crop_size),
+ )
+ )
+ else:
+ sample.perturbed_image = sample.original_image.crop(
+ (0, 0, crop_size[0], crop_size[1])
+ )
+
+ return sample_list
diff --git a/langtest/transform/robustness.py b/langtest/transform/robustness.py
index ac3ed4fd7..9f735be4a 100644
--- a/langtest/transform/robustness.py
+++ b/langtest/transform/robustness.py
@@ -1938,3 +1938,227 @@ def randomize_ages(text):
perturbed_samples.append(s)
return perturbed_samples
+
+
+class AddNewLines(BaseRobustness):
+ """A class for adding new lines to the input text."""
+
+ alias_name = "add_new_lines"
+ supported_tasks = ["text-classification", "question-answering", "summarization"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ prob: Optional[float] = 1.0,
+ count: int = 1,
+ max_lines: int = 3,
+ ) -> List[Sample]:
+ """Transforms the given sample list by adding new lines to the input text.
+
+ Args:
+ sample_list (List[Sample]): The list of samples to transform.
+ prob (Optional[float]): The probability controlling the proportion of samples to be perturbed.
+ Defaults to 0.2.
+ count: Number of variations to create.
+ max_lines: Maximum number of lines to add.
+
+ Returns:
+ List[Sample]: The transformed list of samples with new lines added.
+ """
+
+ def add_new_lines(text: str) -> Tuple[str, List[Transformation]]:
+ transformations = []
+
+ # Find all tokens and their positions
+ tokens = []
+ for match in re.finditer(r"\S+", text):
+ word = match.group()
+ start = match.start()
+ end = match.end()
+ tokens.append({"word": word, "start": start, "end": end})
+
+ if len(tokens) < 5:
+ return text, transformations
+
+ # Decide which tokens to transform
+ transformed_indices = []
+ for i, _ in enumerate(tokens):
+ if random.random() < prob:
+ transformed_indices.append(i)
+
+ # randomly select the transformed indices
+ transformed_indices = random.sample(
+ transformed_indices,
+ min(
+ 2 * len(text) // len(transformed_indices),
+ int(len(transformed_indices) * prob),
+ ),
+ )
+
+ # Build the perturbed text and record transformations
+ perturbed_text = ""
+ prev_end = 0
+ for i, token in enumerate(tokens):
+ # Add any intermediate spaces or punctuation
+ perturbed_text += text[prev_end : token["start"]]
+
+ perturbed_start = len(perturbed_text)
+ perturbed_word = token["word"]
+
+ if i in transformed_indices:
+ perturbed_word += "\n" * max(1, random.randint(1, max_lines))
+ transformations.append(
+ Transformation(
+ original_span=Span(
+ start=token["start"], end=token["end"], word=token["word"]
+ ),
+ new_span=Span(
+ start=perturbed_start,
+ end=perturbed_start + len(perturbed_word),
+ word=perturbed_word,
+ ),
+ ignore=False,
+ )
+ )
+
+ perturbed_text += perturbed_word
+ prev_end = token["end"]
+
+ # Add any remaining text after the last token
+ perturbed_text += text[prev_end:]
+
+ return perturbed_text, transformations
+
+ perturbed_samples = []
+ for s in sample_list:
+ for _ in range(count):
+ sample = deepcopy(s)
+ if isinstance(sample, str):
+ sample, _ = add_new_lines(sample)
+ else:
+ sample.test_case, transformations = add_new_lines(sample.original)
+ if sample.task in ("ner", "text-classification"):
+ sample.transformations = transformations
+ sample.category = "robustness"
+ perturbed_samples.append(sample)
+ return perturbed_samples
+
+
+class AddTabs(BaseRobustness):
+ """A class for adding tabs to the input text."""
+
+ alias_name = "add_tabs"
+ supported_tasks = ["text-classification", "question-answering", "summarization"]
+
+ @staticmethod
+ def transform(
+ sample_list: List[Sample],
+ prob: Optional[float] = 1.0,
+ count: int = 1,
+ max_tabs: int = 5,
+ ) -> List[Sample]:
+ """Transforms the given sample list by adding tabs to the input text.
+
+ Args:
+ sample_list (List[Sample]): The list of samples to transform.
+ prob (Optional[float]): The probability controlling the proportion of samples to be perturbed.
+ Defaults to 1.0, which means all samples will be transformed.
+ count: Number of variations to create.
+
+ Returns:
+ List[Sample]: The transformed list of samples with tabs added.
+ """
+
+ def add_tabs(text: str) -> Tuple[str, List[Transformation]]:
+ """
+ Inserts a random number of tab characters ('\t') after specific tokens in the input text
+ based on a given probability and a maximum limit on the number of tab insertions per token.
+
+ Args:
+ text (str): The input text to modify.
+ prob (float): Probability of adding tabs after a token.
+ max_tabs (int): Maximum number of tabs to insert after a token.
+
+ Returns:
+ Tuple[str, List[Transformation]]: The modified text and a list of transformations applied.
+ """
+ transformations = []
+ perturbed_text = ""
+ prev_end = 0
+ offset = 0 # Track the number of extra characters added (tabs) to avoid breaking words
+
+ # Find all tokens and their positions
+ tokens = []
+ for match in re.finditer(r"\S+", text):
+ word = match.group()
+ start = match.start()
+ end = match.end()
+ tokens.append({"word": word, "start": start, "end": end})
+
+ # If there are too few tokens, no transformation is applied
+ if len(tokens) < 5:
+ return text, transformations
+
+ # Decide which tokens to transform
+ transformed_indices = []
+ for i, _ in enumerate(tokens):
+ if random.random() < prob:
+ transformed_indices.append(i)
+
+ # Build the perturbed text and record transformations
+ for i, token in enumerate(tokens):
+ # Add any intermediate spaces or punctuation
+ perturbed_text += text[prev_end : token["start"]]
+
+ perturbed_start = len(perturbed_text)
+ perturbed_word = token["word"]
+
+ # If the token is selected for transformation, add tabs after it
+ if i in transformed_indices:
+ tabs_to_insert = "\t" * random.randint(1, max_tabs)
+ perturbed_word_with_tabs = perturbed_word + tabs_to_insert
+
+ # Record the transformation details
+ transformations.append(
+ Transformation(
+ original_span=Span(
+ start=token["start"], end=token["end"], word=token["word"]
+ ),
+ new_span=Span(
+ start=perturbed_start,
+ end=perturbed_start + len(perturbed_word_with_tabs),
+ word=perturbed_word_with_tabs,
+ ),
+ ignore=False,
+ )
+ )
+
+ # Add the perturbed word with tabs
+ perturbed_text += perturbed_word_with_tabs
+
+ # Adjust the offset for future transformations (since we added tabs)
+ offset += len(tabs_to_insert)
+ else:
+ # Add the token without tabs if not selected for transformation
+ perturbed_text += perturbed_word
+
+ prev_end = token["end"] # Track the end of the current token
+
+ # Add any remaining text after the last token
+ perturbed_text += text[prev_end:]
+
+ return perturbed_text, transformations
+
+ perturbed_samples = []
+ for s in sample_list:
+ for i in range(count):
+ sample = deepcopy(s)
+ if isinstance(sample, str):
+ sample, _ = add_tabs(sample)
+ else:
+ sample.test_case, transformations = add_tabs(sample.original)
+ if sample.task in ("ner", "text-classification"):
+ sample.transformations = transformations
+ sample.category = "robustness"
+ perturbed_samples.append(sample)
+ return perturbed_samples
diff --git a/langtest/transform/safety.py b/langtest/transform/safety.py
index dbd7ca7d8..08caecaa9 100644
--- a/langtest/transform/safety.py
+++ b/langtest/transform/safety.py
@@ -1,5 +1,6 @@
import asyncio
from abc import ABC, abstractmethod
+from copy import deepcopy
from typing import Dict, List
from ..datahandler.datasource import DataFactory
@@ -7,6 +8,8 @@
from langtest.modelhandler.modelhandler import ModelAPI
from langtest.tasks.task import TaskManager
from langtest.transform.base import ITests
+from langtest.utils.custom_types.output import MaxScoreOutput
+from langtest.utils.custom_types import sample as samples
from langtest.utils.custom_types.sample import Sample
@@ -171,3 +174,104 @@ def transform(self, count: int = 50, *args, **kwargs) -> List[Sample]:
)}"""
return data
+
+
+class InjectionProbalities(BaseSafetyTest):
+ alias_name = "injection_probalities_score"
+ supported_tasks = ["text-classification", "question-answering"]
+ """ Injection Probabilities Score test.
+ """
+
+ def transform(self, max_score: float, *args, **kwargs) -> List[Sample]:
+ """Execute the Injection Probabilities Score test and return resulting `Sample` objects."""
+
+ data = []
+ for sample in self._data_handler:
+ sample = deepcopy(sample)
+ sample.category = "safety"
+ sample.test_type = "injection_probalities_score"
+ sample.expected_results = MaxScoreOutput(max_score=max_score)
+ data.append(sample)
+
+ return data
+
+ @classmethod
+ async def run(self, sample_list: List[Sample], *args, **kwargs) -> List[Sample]:
+ """Execute the Injection Probabilities Score test and return resulting `Sample` objects."""
+
+ from langtest.modelhandler.promptguard import PromptGuard
+
+ prompt_guard = PromptGuard()
+
+ output = []
+
+ # progress bar
+ progress = kwargs.get("progress_bar", False)
+
+ for sample in sample_list:
+ if isinstance(sample, samples.BaseQASample):
+ text = sample.get_prompt()
+ elif isinstance(sample, samples.BaseSample):
+ text = sample.original
+
+ result = prompt_guard.get_indirect_injection_score(text)
+
+ sample.actual_results = MaxScoreOutput(max_score=float(result))
+ sample.state = "done"
+ output.append(sample)
+
+ if progress:
+ progress.update(1)
+ return output
+
+
+class JailBreakProbalities(BaseSafetyTest):
+ alias_name = "jailbreak_probalities_score"
+ supported_tasks = ["text-classification", "question-answering"]
+ """ Jailbreak Probabilities test.
+ """
+
+ def transform(self, max_score: float, *args, **kwargs) -> List[Sample]:
+ """Execute the Jailbreak Probabilities test and return resulting `Sample` objects."""
+
+ data = []
+ for sample in self._data_handler:
+ sample = deepcopy(sample)
+ sample.category = "safety"
+ sample.test_type = "jailbreak_probalities_score"
+ sample.expected_results = MaxScoreOutput(max_score=max_score)
+ data.append(sample)
+
+ return data
+
+ @classmethod
+ async def run(
+ self, sample_list: List[Sample], model: ModelAPI, *args, **kwargs
+ ) -> List[Sample]:
+ """Execute the Jailbreak Probabilities test and return resulting `Sample` objects."""
+
+ from langtest.modelhandler.promptguard import PromptGuard
+
+ prompt_guard = PromptGuard()
+
+ output = []
+
+ # progress bar
+ progress = kwargs.get("progress_bar", False)
+
+ for sample in sample_list:
+ if isinstance(sample, samples.BaseQASample):
+ text = sample.get_prompt()
+ elif isinstance(sample, samples.BaseSample):
+ text = sample.original
+
+ result = prompt_guard.get_jailbreak_score(text)
+
+ sample.actual_results = MaxScoreOutput(max_score=float(result))
+ sample.state = "done"
+
+ output.append(sample)
+
+ if progress:
+ progress.update(1)
+ return output
diff --git a/langtest/transform/utils.py b/langtest/transform/utils.py
index 4540155bf..0fc2dcd23 100644
--- a/langtest/transform/utils.py
+++ b/langtest/transform/utils.py
@@ -397,6 +397,8 @@ def filter_unique_samples(task: str, transformed_samples: list, test_name: str):
no_transformation_applied_tests[test_name] += 1
else:
no_transformation_applied_tests[test_name] = 1
+ elif task == "visualqa":
+ return transformed_samples, no_transformation_applied_tests
else:
for sample in transformed_samples:
if sample.original.replace(" ", "") != sample.test_case.replace(" ", ""):
diff --git a/langtest/utils/custom_types/__init__.py b/langtest/utils/custom_types/__init__.py
index 41d60e870..82e3e62f0 100644
--- a/langtest/utils/custom_types/__init__.py
+++ b/langtest/utils/custom_types/__init__.py
@@ -22,6 +22,7 @@
CrowsPairsSample,
StereoSetSample,
TextGenerationSample,
+ VisualQASample,
)
from .helpers import Span, Transformation
from .output import (
diff --git a/langtest/utils/custom_types/helpers.py b/langtest/utils/custom_types/helpers.py
index fa9e43f61..ac04cb20c 100644
--- a/langtest/utils/custom_types/helpers.py
+++ b/langtest/utils/custom_types/helpers.py
@@ -753,12 +753,23 @@ def __new__(cls):
def prepare_model_response(self, data):
"""check the model response"""
+ from langtest.utils.custom_types import SequenceClassificationSample, NERSample
- if data[0].task == "text-classification":
+ if (
+ isinstance(data[0], SequenceClassificationSample)
+ and data[0].task == "text-classification"
+ ):
for sample in data:
- sample.actual_results = sample.actual_results.predictions[0]
- sample.expected_results = sample.expected_results.predictions[0]
- elif data[0].task == "ner":
+ if (
+ hasattr(sample.expected_results, "multi_label")
+ and sample.expected_results.multi_label
+ ):
+ sample.actual_results = sample.actual_results
+ sample.expected_results = sample.expected_results
+ else:
+ sample.actual_results = sample.actual_results.predictions[0]
+ sample.expected_results = sample.expected_results.predictions[0]
+ elif isinstance(data[0], NERSample) and data[0].task == "ner":
for sample in data:
sample.actual_results = sample.actual_results.predictions
sample.expected_results = sample.expected_results.predictions
diff --git a/langtest/utils/custom_types/output.py b/langtest/utils/custom_types/output.py
index 6961e4b0f..619a71fcb 100644
--- a/langtest/utils/custom_types/output.py
+++ b/langtest/utils/custom_types/output.py
@@ -20,6 +20,8 @@ def to_str_list(self) -> str:
def __str__(self) -> str:
"""String representation"""
+ if self.multi_label:
+ return self.to_str_list()
labels = {elt.label: elt.score for elt in self.predictions}
return f"SequenceClassificationOutput(predictions={labels})"
@@ -54,11 +56,11 @@ def to_str_list(self) -> float:
def __repr__(self) -> str:
"""Printable representation"""
- return f"{self.min_score}"
+ return f"{self.min_score:.3f}"
def __str__(self) -> str:
"""String representation"""
- return f"{self.min_score}"
+ return f"{self.min_score:.3f}"
class MaxScoreOutput(BaseModel):
@@ -72,11 +74,19 @@ def to_str_list(self) -> float:
def __repr__(self) -> str:
"""Printable representation"""
- return f"{self.max_score}"
+ return f"{self.max_score:.3f}"
def __str__(self) -> str:
"""String representation"""
- return f"{self.max_score}"
+ return f"{self.max_score:.3f}"
+
+ def __eq__(self, other: "MaxScoreOutput") -> bool:
+ """Greater than comparison method."""
+ return self.max_score >= other.max_score
+
+ def __ge__(self, other: "MaxScoreOutput") -> bool:
+ """Greater than comparison method."""
+ return self.max_score >= other.max_score
class NEROutput(BaseModel):
diff --git a/langtest/utils/custom_types/sample.py b/langtest/utils/custom_types/sample.py
index 8477fb9bb..68bbd336b 100644
--- a/langtest/utils/custom_types/sample.py
+++ b/langtest/utils/custom_types/sample.py
@@ -3,12 +3,14 @@
import importlib
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union, Callable
from copy import deepcopy
+
+from langtest.modelhandler.modelhandler import ModelAPI
from ...errors import Errors
from pydantic import BaseModel, PrivateAttr, validator, Field
from .helpers import Transformation, Span
from .helpers import default_user_prompt
from ...metrics import EmbeddingDistance
-from .output import NEROutput, Result
+from .output import MaxScoreOutput, NEROutput, Result
from .predictions import NERPrediction
@@ -488,6 +490,32 @@ def run(self, model, **kwargs):
)
return tokens
+ def get_prompt(self):
+ """Returns the prompt for the sample"""
+ from .helpers import (
+ build_qa_input,
+ build_qa_prompt,
+ SimplePromptTemplate,
+ )
+
+ dataset_name = (
+ self.dataset_name.split("-")[0].lower()
+ if self.dataset_name
+ else "default_question_answering_prompt"
+ )
+
+ original_text_input = build_qa_input(
+ context=self.original_context,
+ question=self.original_question,
+ options=self.options,
+ )
+
+ prompt = build_qa_prompt(original_text_input, dataset_name)
+
+ query = SimplePromptTemplate(**prompt).format(**original_text_input)
+
+ return query
+
class QASample(BaseQASample):
"""A class representing a sample for the question answering task.
@@ -592,6 +620,9 @@ def is_pass(self) -> bool:
if self.ran_pass is not None:
return self.ran_pass
+ elif isinstance(self.expected_results, MaxScoreOutput):
+ self.ran_pass = self.expected_results >= self.actual_results
+ return self.ran_pass
else:
self.__update_params()
try:
@@ -2751,6 +2782,320 @@ class FillMaskSample(TextGenerationSample):
pass
+class VisualQASample(BaseModel):
+ """
+ A class representing a sample for the Visual Question Answering task.
+
+ Attributes:
+ original_image (str): The original image used for the test.
+ perturbed_image (str): The perturbed image used for the test.
+ question (str): The question asked about the image.
+ ground_truth (str): The ground truth answer to the question.
+ expected_result (str): The expected result of the test.
+ actual_result (str): The actual result of the test.
+ """
+
+ from PIL.Image import Image
+
+ original_image: Union[Image, str, Any] = None
+ perturbed_image: Union[Image, str, Any] = None
+ question: str = None
+ options: str = None
+ ground_truth: str = None
+ expected_results: str = None
+ actual_results: str = None
+ dataset_name: str = None
+ category: str = None
+ test_type: str = None
+ state: str = None
+ task: str = None
+ ran_pass: bool = None
+ metric_name: str = None
+ config: Union[str, dict] = None
+ state: str = None
+ task: str = Field(default="visualqa", const=True)
+ distance_result: float = None
+ eval_model: str = None
+ feedback: str = None
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def __init__(self, **data):
+ super().__init__(**data)
+ self.original_image = self.__load_image(self.original_image)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Converts the VisualQASample object to a dictionary.
+
+ Returns:
+ Dict[str, Any]: A dictionary representation of the VisualQASample object.
+ """
+ self.__update_params()
+
+ result = {
+ "category": self.category,
+ "test_type": self.test_type,
+ "original_image": self.convert_image_to_html(self.original_image),
+ "perturbed_image": self.convert_image_to_html(self.perturbed_image),
+ "question": self.question,
+ }
+
+ if self.options is not None:
+ result["options"] = self.options
+
+ if self.state == "done":
+ if self.expected_results is not None and self.actual_results is not None:
+ result.update(
+ {
+ "expected_result": self.expected_results,
+ "actual_result": self.actual_results,
+ "pass": self.is_pass(),
+ }
+ )
+ if "evaluation" in self.config and "metric" in self.config["evaluation"]:
+ if self.config["evaluation"]["metric"].lower() == "prometheus_eval":
+ result.update({"feedback": self.feedback})
+ elif self.config["evaluation"]["metric"].lower() != "llm_eval":
+ result.update({"eval_score": self.distance_result})
+
+ return result
+
+ def run(self, model: ModelAPI, **kwargs):
+ """
+ Run the VisualQASample test using the provided model.
+
+ Args:
+ model: The model used for VisualQASample testing.
+ **kwargs: Additional keyword arguments for the model.
+
+ Returns:
+ bool: True
+ """
+
+ dataset_name = self.dataset_name.split("-")[0].lower()
+ prompt_template = kwargs.get(
+ "user_prompt",
+ default_user_prompt.get(
+ dataset_name,
+ (
+ """You are an AI Vision bot specializing in providing accurate and concise answers to multiple-choice questions. You will be presented with a question and options. Choose the correct answer.
+
+Example:
+
+Question: What is the capital of France ?
+
+Options:
+A. Berlin
+B. Madrid
+C. Paris
+D. Rome
+
+Answer: C. Paris.
+
+Example 2:
+
+Question: What is in the image ?
+
+Options:
+A. Dog
+B. Cat
+C. Elephant
+D. Ear
+
+Answer: UnRecognizable.
+"""
+ " Similary \n Question: {question}\nOptions: {options}\n Answer:"
+ ),
+ ),
+ )
+
+ server_prompt = kwargs.get("server_prompt", " ")
+
+ text_dict = {
+ "question": self.question,
+ }
+ input_variables = ["question"]
+
+ if self.options is not None:
+ text_dict["options"] = self.options
+ input_variables.append("options")
+
+ payload = {
+ "text": text_dict,
+ "prompt": {
+ "template": prompt_template,
+ "input_variables": input_variables,
+ },
+ }
+
+ # convert the image to base64 url
+ orig_image = self.convert_image_to_bas64_url(self.original_image)
+ pred_image = self.convert_image_to_bas64_url(self.perturbed_image)
+
+ self.expected_results = model(
+ **payload,
+ images=(orig_image,),
+ server_prompt=server_prompt,
+ )
+ self.actual_results = model(
+ **payload,
+ images=(pred_image,),
+ server_prompt=server_prompt,
+ )
+ return True
+
+ def transform(self, func: Callable, params: Dict, **kwargs):
+ """
+ Transform the original image using a specified function.
+
+ Args:
+ func (Callable): The transformation function.
+ params (Dict): Parameters for the transformation function.
+ **kwargs: Additional keyword arguments for the transformation.
+
+ """
+ sens = [self.original_image]
+ self.perturbed_image = func(sens, **params, **kwargs)
+ self.category = func.__module__.split(".")[-1]
+
+ return self
+
+ def __load_image(self, image_path):
+ # check the image path as url using regex
+ import requests
+ from PIL.Image import Image
+ import io
+ import base64
+
+ if isinstance(image_path, dict) and "bytes" in image_path:
+ image = Image.open(io.BytesIO(image_path["bytes"]))
+ elif isinstance(image_path, str) and re.match(r"^https?://", image_path):
+ response = requests.get(image_path)
+ image = Image.open(io.BytesIO(response.content))
+ elif isinstance(image_path, str) and re.match(r"^data:image", image_path):
+ image = Image.open(io.BytesIO(base64.b64decode(image_path.split(",")[1])))
+ elif isinstance(image_path, Image):
+ image = image_path
+ else:
+ image = Image.open(image_path)
+ return image.convert("RGB")
+
+ def convert_image_to_html(self, image: Image):
+ import io
+ import base64
+
+ if image is not None:
+ image = image.copy()
+ buffered = io.BytesIO()
+ image.thumbnail((200, 200))
+ image.save(buffered, format="PNG")
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return f''
+
+ def convert_image_to_bas64_url(self, image: Image):
+ import io
+ import base64
+
+ if image is not None:
+ image = image.copy()
+ buffered = io.BytesIO()
+ image.thumbnail((400, 400))
+ image.save(buffered, format="PNG")
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return f"data:image/png;base64,{img_str}"
+
+ def __update_params(self):
+ from ...langtest import HARNESS_CONFIG as harness_config
+
+ self.config = harness_config
+ self.metric_name = (
+ self.config.get("evaluation", {}).get("metric", "llm_eval").lower()
+ )
+
+ if self.state == "done":
+ from ...langtest import EVAL_MODEL
+
+ if (
+ "evaluation" in harness_config
+ and "metric" in harness_config["evaluation"]
+ ):
+ if harness_config["evaluation"]["metric"].lower() == "llm_eval":
+ model = harness_config["evaluation"].get("model", None)
+ hub = harness_config["evaluation"].get("hub", None)
+ if model and hub:
+ from ...tasks import TaskManager
+
+ load_eval_model = TaskManager(self.task)
+ self.eval_model = load_eval_model.model(
+ model, hub, **harness_config.get("model_parameters", {})
+ )
+
+ else:
+ self.eval_model = EVAL_MODEL
+
+ def is_pass(self) -> bool:
+ """Checks if the sample has passed the evaluation.
+
+ Returns:
+ bool: True if the sample passed the evaluation, False otherwise.
+ """
+
+ if self.ran_pass is not None:
+ return self.ran_pass
+ elif self.expected_results.strip().lower() == self.actual_results.strip().lower():
+ self.ran_pass = True
+ return True
+ else:
+ self.__update_params()
+ try:
+ metric_module = importlib.import_module(
+ "langtest.utils.custom_types.helpers"
+ )
+ metric_function = getattr(metric_module, f"is_pass_{self.metric_name}")
+ except (ImportError, AttributeError):
+ raise ValueError(f"Metric '{self.metric_name}' not found.")
+
+ if self.metric_name == "string_distance":
+ selected_distance = self.config["evaluation"].get("distance", "jaro")
+ threshold = self.config["evaluation"].get("threshold")
+
+ elif self.metric_name == "embedding_distance":
+ selected_distance = self.config["evaluation"].get("distance", "cosine")
+ threshold = self.config["evaluation"].get("threshold")
+
+ if self.metric_name in (
+ "string_distance",
+ "embedding_distance",
+ ):
+ self.distance_result, result = metric_function(
+ answer=self.expected_results,
+ prediction=self.actual_results,
+ selected_distance=selected_distance,
+ threshold=threshold,
+ )
+ self.ran_pass = result
+ return result
+ elif self.metric_name == "llm_eval":
+ if isinstance(self.eval_model, dict):
+ self.eval_model = list(self.eval_model.values())[-1]
+ result = metric_function(
+ eval_model=self.eval_model,
+ dataset_name=self.dataset_name,
+ original_question=" " + self.question,
+ answer=self.expected_results,
+ perturbed_question=" " + self.question,
+ prediction=self.actual_results,
+ )
+
+ self.ran_pass = result
+ return result
+
+ else:
+ raise ValueError(f"Metric '{self.metric_name}' not found.")
+
+
Sample = TypeVar(
"Sample",
MaxScoreSample,
@@ -2772,4 +3117,5 @@ class FillMaskSample(TextGenerationSample):
LegalSample,
CrowsPairsSample,
StereoSetSample,
+ VisualQASample,
)
diff --git a/langtest/utils/util_metrics.py b/langtest/utils/util_metrics.py
index 4fd960a0f..c55cb377d 100644
--- a/langtest/utils/util_metrics.py
+++ b/langtest/utils/util_metrics.py
@@ -1,10 +1,14 @@
from collections import Counter
-from typing import List, Union, Dict
+from typing import List, Set, Union, Dict
from ..errors import Errors
+import pandas as pd
def classification_report(
- y_true: List[Union[str, int]], y_pred: List[Union[str, int]], zero_division: int = 0
+ y_true: List[Union[str, int]],
+ y_pred: List[Union[str, int]],
+ zero_division: int = 0,
+ multi_label: bool = False,
) -> Dict[str, Dict[str, Union[float, int]]]:
"""Generate a classification report including precision, recall, f1-score, and support.
@@ -96,7 +100,15 @@ def calculate_f1_score(
"""
assert len(y_true) == len(y_pred), "Lengths of y_true and y_pred must be equal."
- unique_labels = set(y_true + y_pred)
+ if isinstance(y_true, list) and isinstance(y_pred, list):
+ unique_labels = set(y_true + y_pred)
+ elif isinstance(y_true, pd.Series) and isinstance(y_pred, pd.Series):
+ unique_labels = set(y_true.tolist() + y_pred.tolist())
+ else:
+ raise ValueError(
+ "y_true and y_pred must be of the same type. Supported types are list and pandas Series."
+ )
+
num_classes = len(unique_labels)
if average == "micro":
@@ -170,3 +182,222 @@ def calculate_f1_score(
else:
raise ValueError(Errors.E074)
return f1_score
+
+
+def simple_multilabel_binarizer(y_true, y_pred):
+ """
+ A simple implementation of a multilabel binarizer for y_true and y_pred.
+
+ Args:
+ y_true (list of lists or sets): Actual labels for the data.
+ y_pred (list of lists or sets): Predicted labels for the data.
+
+ Returns:
+ binarized_y_true (list of lists): Binary matrix of true labels.
+ binarized_y_pred (list of lists): Binary matrix of predicted labels.
+ classes (list): List of all unique classes (labels).
+ """
+ # Ensure we collect unique classes from both y_true and y_pred
+ classes = sorted(set(label for labels in y_true + y_pred for label in labels))
+
+ # Create a binary matrix for y_true and y_pred
+ y_true_bin = [[1 if label in labels else 0 for label in classes] for labels in y_true]
+ y_pred_bin = [[1 if label in labels else 0 for label in classes] for labels in y_pred]
+
+ # Return the binarized labels and the consistent set of classes
+ return y_true_bin, y_pred_bin, classes
+
+
+def classification_report_multi_label(
+ y_true: List[Set[Union[str, int]]],
+ y_pred: List[Set[Union[str, int]]],
+ zero_division: int = 0,
+) -> Dict[str, Dict[str, Union[float, int]]]:
+ """
+ Generate a classification report for multi-label classification.
+
+ Args:
+ y_true (List[Set[Union[str, int]]]): List of sets of true labels.
+ y_pred (List[Set[Union[str, int]]]): List of sets of predicted labels.
+ zero_division (int, optional): Specifies the value to return when there is a zero division case. Defaults to 0.
+
+ Returns:
+ Dict[str, Dict[str, Union[float, int]]]: Classification report.
+ """
+ # Binarize the multi-label data
+ y_true_bin, y_pred_bin, classes = simple_multilabel_binarizer(y_true, y_pred)
+
+ # Initialize data structure for the report
+ report = {}
+ for i, class_label in enumerate(classes):
+ support = sum(row[i] for row in y_true_bin)
+ predicted_labels = sum(row[i] for row in y_pred_bin)
+ correct_predictions = sum(
+ 1
+ for true_row, pred_row in zip(y_true_bin, y_pred_bin)
+ if true_row[i] == pred_row[i] == 1
+ )
+
+ # Precision, recall, and F1 score calculations
+ if predicted_labels > 0:
+ precision = correct_predictions / predicted_labels
+ else:
+ precision = zero_division
+
+ if support > 0:
+ recall = correct_predictions / support
+ else:
+ recall = zero_division
+
+ if (precision + recall) > 0:
+ f1_score = (2 * precision * recall) / (precision + recall)
+ else:
+ f1_score = zero_division
+
+ # Add stats to the report
+ report[class_label] = {
+ "precision": precision,
+ "recall": recall,
+ "f1-score": f1_score,
+ "support": support,
+ }
+
+ # Compute macro averages
+ avg_precision = sum([metrics["precision"] for metrics in report.values()]) / len(
+ report
+ )
+ avg_recall = sum([metrics["recall"] for metrics in report.values()]) / len(report)
+ avg_f1_score = sum([metrics["f1-score"] for metrics in report.values()]) / len(report)
+
+ report["macro avg"] = {
+ "precision": avg_precision,
+ "recall": avg_recall,
+ "f1-score": avg_f1_score,
+ "support": len(y_true),
+ }
+
+ return report
+
+
+def calculate_f1_score_multi_label(
+ y_true: List[Set[Union[str, int]]],
+ y_pred: List[Set[Union[str, int]]],
+ average: str = "macro",
+ zero_division: int = 0,
+) -> float:
+ """
+ Calculate the F1 score for multi-label classification using binarized labels.
+
+ Args:
+ y_true (List[Set[Union[str, int]]]): List of sets of true labels.
+ y_pred (List[Set[Union[str, int]]]): List of sets of predicted labels.
+ average (str, optional): Method to calculate F1 score, can be 'micro', 'macro', or 'weighted'. Defaults to 'macro'.
+ zero_division (int, optional): Value to return when there is a zero division case. Defaults to 0.
+
+ Returns:
+ float: Calculated F1 score for multi-label classification.
+
+ Raises:
+ AssertionError: If lengths of y_true and y_pred are not equal.
+ ValueError: If invalid averaging method is provided.
+ """
+ assert len(y_true) == len(y_pred), "Lengths of y_true and y_pred must be equal."
+
+ # Binarize the labels and get the unique class set
+ y_true_bin, y_pred_bin, classes = simple_multilabel_binarizer(y_true, y_pred)
+
+ # Number of classes should remain consistent
+ num_classes = len(classes)
+
+ if average == "micro":
+ true_positives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ for j in range(num_classes)
+ if y_true_bin[i][j] == y_pred_bin[i][j] == 1
+ )
+ false_positives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ for j in range(num_classes)
+ if y_pred_bin[i][j] == 1 and y_true_bin[i][j] == 0
+ )
+ false_negatives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ for j in range(num_classes)
+ if y_pred_bin[i][j] == 0 and y_true_bin[i][j] == 1
+ )
+
+ precision = (
+ true_positives / (true_positives + false_positives)
+ if (true_positives + false_positives) > 0
+ else zero_division
+ )
+ recall = (
+ true_positives / (true_positives + false_negatives)
+ if (true_positives + false_negatives) > 0
+ else zero_division
+ )
+ f1_score = (
+ 2 * (precision * recall) / (precision + recall)
+ if (precision + recall) > 0
+ else zero_division
+ )
+
+ elif average in ["macro", "weighted"]:
+ f1_score = 0.0
+ total_support = sum(
+ sum(y_true_bin[i][j] for i in range(len(y_true_bin)))
+ for j in range(num_classes)
+ )
+
+ for j in range(num_classes):
+ true_positives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ if y_true_bin[i][j] == y_pred_bin[i][j] == 1
+ )
+ false_positives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ if y_pred_bin[i][j] == 1 and y_true_bin[i][j] == 0
+ )
+ false_negatives = sum(
+ 1
+ for i in range(len(y_true_bin))
+ if y_pred_bin[i][j] == 0 and y_true_bin[i][j] == 1
+ )
+
+ precision = (
+ true_positives / (true_positives + false_positives)
+ if (true_positives + false_positives) > 0
+ else zero_division
+ )
+ recall = (
+ true_positives / (true_positives + false_negatives)
+ if (true_positives + false_negatives) > 0
+ else zero_division
+ )
+
+ if precision + recall > 0:
+ class_f1_score = 2 * (precision * recall) / (precision + recall)
+ else:
+ class_f1_score = 0.0
+
+ # Support for the current class (how many times it appears in y_true)
+ support = sum(y_true_bin[i][j] for i in range(len(y_true_bin)))
+
+ if average == "macro":
+ f1_score += class_f1_score / num_classes
+ elif average == "weighted":
+ # Normalize weights by dividing the support by the total number of labels
+ weight = support / total_support if total_support > 0 else 0
+ f1_score += weight * class_f1_score
+
+ else:
+ raise ValueError(
+ "Invalid averaging method. Must be 'micro', 'macro', or 'weighted'."
+ )
+
+ return min(f1_score, 1.0) # Ensure the F1 score is capped at 1.0
diff --git a/poetry.lock b/poetry.lock
index eb8658718..b3655893c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -13,31 +13,34 @@ files = [
[[package]]
name = "accelerate"
-version = "0.20.3"
+version = "0.33.0"
description = "Accelerate"
optional = true
-python-versions = ">=3.7.0"
+python-versions = ">=3.8.0"
files = [
- {file = "accelerate-0.20.3-py3-none-any.whl", hash = "sha256:147183e7a2215f7bd45a7af3b986a963daa8a61fa58b0912b9473049e011ad15"},
- {file = "accelerate-0.20.3.tar.gz", hash = "sha256:79a896978c20dac270083d42bf033f4c9a80dcdd6b946f1ca92d8d6d0f0f5ba9"},
+ {file = "accelerate-0.33.0-py3-none-any.whl", hash = "sha256:0a7f33d60ba09afabd028d4f0856dd19c5a734b7a596d637d9dd6e3d0eadbaf3"},
+ {file = "accelerate-0.33.0.tar.gz", hash = "sha256:11ba481ed6ea09191775df55ce464aeeba67a024bd0261a44b77b30fb439e26a"},
]
[package.dependencies]
-numpy = ">=1.17"
+huggingface-hub = ">=0.21.0"
+numpy = ">=1.17,<2.0.0"
packaging = ">=20.0"
psutil = "*"
pyyaml = "*"
-torch = ">=1.6.0"
+safetensors = ">=0.3.1"
+torch = ">=1.10.0"
[package.extras]
-dev = ["black (>=23.1,<24.0)", "datasets", "deepspeed", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.0.241)", "scikit-learn", "scipy", "tqdm", "transformers", "urllib3 (<2.0.0)"]
-quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.0.241)", "urllib3 (<2.0.0)"]
+deepspeed = ["deepspeed (<=0.14.0)"]
+dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "diffusers", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.2.1,<0.3.0)", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
+quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.2.1,<0.3.0)"]
rich = ["rich"]
sagemaker = ["sagemaker"]
-test-dev = ["datasets", "deepspeed", "evaluate", "scikit-learn", "scipy", "tqdm", "transformers"]
-test-prod = ["parameterized", "pytest", "pytest-subtests", "pytest-xdist"]
-test-trackers = ["comet-ml", "tensorboard", "wandb"]
-testing = ["datasets", "deepspeed", "evaluate", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "tqdm", "transformers"]
+test-dev = ["bitsandbytes", "datasets", "diffusers", "evaluate", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
+test-prod = ["parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist"]
+test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"]
+testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"]
[[package]]
name = "ai21"
@@ -412,6 +415,39 @@ numpy = [
{version = ">=1.19.0", markers = "python_version >= \"3.9\""},
]
+[[package]]
+name = "blosc2"
+version = "2.0.0"
+description = "Python wrapper for the C-Blosc2 library."
+optional = false
+python-versions = ">=3.8, <4"
+files = [
+ {file = "blosc2-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4085e5c1df186e1747d8a8578b0cc1c8b7668391d635e9f89e17156912fba85a"},
+ {file = "blosc2-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cec4c0570543921ce6b8c9ffdbf9f2170de37ecaf8e2b213e867e3130b81f205"},
+ {file = "blosc2-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fbecd7ef4876811719aa58d84e4b414f430f329162c18578b870f5e77b59864"},
+ {file = "blosc2-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96dd63eb7641594208e6a865fd60a0bdde24568a180180beb8af4d6608796f3a"},
+ {file = "blosc2-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:659e18d5e606a0ca4d80766f86d87e640818051911d01679eed11022243a7e4f"},
+ {file = "blosc2-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c58990ab2bcd0f412496acf1d05c65d955d963197bbaa57b10b2ace31c29181a"},
+ {file = "blosc2-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f206e6a01a8167b441bf886ff022eb20e0f085b09300f49f3018f566c14d918a"},
+ {file = "blosc2-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ec63269428aa3fb45f7d4881b2d11b428c4cb62e854caf54a767a64da4df83e"},
+ {file = "blosc2-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98ab1cd57f9d7422f1636a6b290f2940113ee8be26bfe3823e8c011826972b9c"},
+ {file = "blosc2-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:54c5614b18f9f01473758fa64e3bc699adbe31b307a45eca0e07fa2204e4d4a1"},
+ {file = "blosc2-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f465b8ab54ecde6b8654672a50a4c3699aafd8e2de0dfcd84ed53f8a34c1734a"},
+ {file = "blosc2-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0497793f55db0b75de08eb4c047a0bc5b96dbe5e405b53803dd3368e36336188"},
+ {file = "blosc2-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5c52649837d514669107c77e8f172e9e5ecfa030eef0d378bb47ce1689921c9"},
+ {file = "blosc2-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8eb02f67d4ed8ac8f0ce5f3c8cafc0059255bb6899fd35127e4076925640f239"},
+ {file = "blosc2-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:32c365bf744353103ed91dc1f03889de03b986588181601594aa7ee773818cb4"},
+ {file = "blosc2-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d98e850f0de55e15c402c6e27105ba850f8954e784e30a7f8bde89eb70a08574"},
+ {file = "blosc2-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6545d6d7e7365a2a3533d4bdf7095856443aed7d5ddc577ecd0e78083790bff1"},
+ {file = "blosc2-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c193959a984e7814833c8be6b9026d7744d2cff4d450476561583a87152e13e"},
+ {file = "blosc2-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea3396de7757092d502fb297a44a8d019d92e750e5aebcd9d39a157fde8785b3"},
+ {file = "blosc2-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:ef018926b55799cf23345127dde8f29356b4451b3e067e1b07f0d186213bd821"},
+ {file = "blosc2-2.0.0.tar.gz", hash = "sha256:f19b0b3674f6c825b490f00d8264b0c540c2cdc11ec7e81178d38b83c57790a1"},
+]
+
+[package.dependencies]
+msgpack = "*"
+
[[package]]
name = "boto3"
version = "1.34.93"
@@ -761,6 +797,81 @@ files = [
{file = "cymem-2.0.7.tar.gz", hash = "sha256:e6034badb5dd4e10344211c81f16505a55553a7164adc314c75bd80cf07e57a8"},
]
+[[package]]
+name = "cython"
+version = "3.0.11"
+description = "The Cython compiler for writing C extensions in the Python language."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+files = [
+ {file = "Cython-3.0.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:44292aae17524abb4b70a25111fe7dec1a0ad718711d47e3786a211d5408fdaa"},
+ {file = "Cython-3.0.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75d45fbc20651c1b72e4111149fed3b33d270b0a4fb78328c54d965f28d55e1"},
+ {file = "Cython-3.0.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89a82937ce4037f092e9848a7bbcc65bc8e9fc9aef2bb74f5c15e7d21a73080"},
+ {file = "Cython-3.0.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ea2e7e2d3bc0d8630dafe6c4a5a89485598ff8a61885b74f8ed882597efd5"},
+ {file = "Cython-3.0.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cee29846471ce60226b18e931d8c1c66a158db94853e3e79bc2da9bd22345008"},
+ {file = "Cython-3.0.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eeb6860b0f4bfa402de8929833fe5370fa34069c7ebacb2d543cb017f21fb891"},
+ {file = "Cython-3.0.11-cp310-cp310-win32.whl", hash = "sha256:3699391125ab344d8d25438074d1097d9ba0fb674d0320599316cfe7cf5f002a"},
+ {file = "Cython-3.0.11-cp310-cp310-win_amd64.whl", hash = "sha256:d02f4ebe15aac7cdacce1a628e556c1983f26d140fd2e0ac5e0a090e605a2d38"},
+ {file = "Cython-3.0.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75ba1c70b6deeaffbac123856b8d35f253da13552207aa969078611c197377e4"},
+ {file = "Cython-3.0.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af91497dc098718e634d6ec8f91b182aea6bb3690f333fc9a7777bc70abe8810"},
+ {file = "Cython-3.0.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3999fb52d3328a6a5e8c63122b0a8bd110dfcdb98dda585a3def1426b991cba7"},
+ {file = "Cython-3.0.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d566a4e09b8979be8ab9f843bac0dd216c81f5e5f45661a9b25cd162ed80508c"},
+ {file = "Cython-3.0.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:46aec30f217bdf096175a1a639203d44ac73a36fe7fa3dd06bd012e8f39eca0f"},
+ {file = "Cython-3.0.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd1fe25af330f4e003421636746a546474e4ccd8f239f55d2898d80983d20ed"},
+ {file = "Cython-3.0.11-cp311-cp311-win32.whl", hash = "sha256:221de0b48bf387f209003508e602ce839a80463522fc6f583ad3c8d5c890d2c1"},
+ {file = "Cython-3.0.11-cp311-cp311-win_amd64.whl", hash = "sha256:3ff8ac1f0ecd4f505db4ab051e58e4531f5d098b6ac03b91c3b902e8d10c67b3"},
+ {file = "Cython-3.0.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:11996c40c32abf843ba652a6d53cb15944c88d91f91fc4e6f0028f5df8a8f8a1"},
+ {file = "Cython-3.0.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63f2c892e9f9c1698ecfee78205541623eb31cd3a1b682668be7ac12de94aa8e"},
+ {file = "Cython-3.0.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b14c24f1dc4c4c9d997cca8d1b7fb01187a218aab932328247dcf5694a10102"},
+ {file = "Cython-3.0.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8eed5c015685106db15dd103fd040948ddca9197b1dd02222711815ea782a27"},
+ {file = "Cython-3.0.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780f89c95b8aec1e403005b3bf2f0a2afa060b3eba168c86830f079339adad89"},
+ {file = "Cython-3.0.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a690f2ff460682ea985e8d38ec541be97e0977fa0544aadc21efc116ff8d7579"},
+ {file = "Cython-3.0.11-cp312-cp312-win32.whl", hash = "sha256:2252b5aa57621848e310fe7fa6f7dce5f73aa452884a183d201a8bcebfa05a00"},
+ {file = "Cython-3.0.11-cp312-cp312-win_amd64.whl", hash = "sha256:da394654c6da15c1d37f0b7ec5afd325c69a15ceafee2afba14b67a5df8a82c8"},
+ {file = "Cython-3.0.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4341d6a64d47112884e0bcf31e6c075268220ee4cd02223047182d4dda94d637"},
+ {file = "Cython-3.0.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:351955559b37e6c98b48aecb178894c311be9d731b297782f2b78d111f0c9015"},
+ {file = "Cython-3.0.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c02361af9bfa10ff1ccf967fc75159e56b1c8093caf565739ed77a559c1f29f"},
+ {file = "Cython-3.0.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6823aef13669a32caf18bbb036de56065c485d9f558551a9b55061acf9c4c27f"},
+ {file = "Cython-3.0.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6fb68cef33684f8cc97987bee6ae919eee7e18ee6a3ad7ed9516b8386ef95ae6"},
+ {file = "Cython-3.0.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:790263b74432cb997740d73665f4d8d00b9cd1cecbdd981d93591ddf993d4f12"},
+ {file = "Cython-3.0.11-cp313-cp313-win32.whl", hash = "sha256:e6dd395d1a704e34a9fac00b25f0036dce6654c6b898be6f872ac2bb4f2eda48"},
+ {file = "Cython-3.0.11-cp313-cp313-win_amd64.whl", hash = "sha256:52186101d51497519e99b60d955fd5cb3bf747c67f00d742e70ab913f1e42d31"},
+ {file = "Cython-3.0.11-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c69d5cad51388522b98a99b4be1b77316de85b0c0523fa865e0ea58bbb622e0a"},
+ {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8acdc87e9009110adbceb7569765eb0980129055cc954c62f99fe9f094c9505e"},
+ {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1dd47865f4c0a224da73acf83d113f93488d17624e2457dce1753acdfb1cc40c"},
+ {file = "Cython-3.0.11-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:301bde949b4f312a1c70e214b0c3bc51a3f955d466010d2f68eb042df36447b0"},
+ {file = "Cython-3.0.11-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:f3953d2f504176f929862e5579cfc421860c33e9707f585d70d24e1096accdf7"},
+ {file = "Cython-3.0.11-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:3f2b062f6df67e8a56c75e500ca330cf62c85ac26dd7fd006f07ef0f83aebfa3"},
+ {file = "Cython-3.0.11-cp36-cp36m-win32.whl", hash = "sha256:c3d68751668c66c7a140b6023dba5d5d507f72063407bb609d3a5b0f3b8dfbe4"},
+ {file = "Cython-3.0.11-cp36-cp36m-win_amd64.whl", hash = "sha256:bcd29945fafd12484cf37b1d84f12f0e7a33ba3eac5836531c6bd5283a6b3a0c"},
+ {file = "Cython-3.0.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4e9a8d92978b15a0c7ca7f98447c6c578dc8923a0941d9d172d0b077cb69c576"},
+ {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:421017466e9260aca86823974e26e158e6358622f27c0f4da9c682f3b6d2e624"},
+ {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80a7232938d523c1a12f6b1794ab5efb1ae77ad3fde79de4bb558d8ab261619"},
+ {file = "Cython-3.0.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfa550d9ae39e827a6e7198076df763571cb53397084974a6948af558355e028"},
+ {file = "Cython-3.0.11-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:aedceb6090a60854b31bf9571dc55f642a3fa5b91f11b62bcef167c52cac93d8"},
+ {file = "Cython-3.0.11-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:473d35681d9f93ce380e6a7c8feb2d65fc6333bd7117fbc62989e404e241dbb0"},
+ {file = "Cython-3.0.11-cp37-cp37m-win32.whl", hash = "sha256:3379c6521e25aa6cd7703bb7d635eaca75c0f9c7f1b0fdd6dd15a03bfac5f68d"},
+ {file = "Cython-3.0.11-cp37-cp37m-win_amd64.whl", hash = "sha256:14701edb3107a5d9305a82d9d646c4f28bfecbba74b26cc1ee2f4be08f602057"},
+ {file = "Cython-3.0.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598699165cfa7c6d69513ee1bffc9e1fdd63b00b624409174c388538aa217975"},
+ {file = "Cython-3.0.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0583076c4152b417a3a8a5d81ec02f58c09b67d3f22d5857e64c8734ceada8c"},
+ {file = "Cython-3.0.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52205347e916dd65d2400b977df4c697390c3aae0e96275a438cc4ae85dadc08"},
+ {file = "Cython-3.0.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:989899a85f0d9a57cebb508bd1f194cb52f0e3f7e22ac259f33d148d6422375c"},
+ {file = "Cython-3.0.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:53b6072a89049a991d07f42060f65398448365c59c9cb515c5925b9bdc9d71f8"},
+ {file = "Cython-3.0.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f988f7f8164a6079c705c39e2d75dbe9967e3dacafe041420d9af7b9ee424162"},
+ {file = "Cython-3.0.11-cp38-cp38-win32.whl", hash = "sha256:a1f4cbc70f6b7f0c939522118820e708e0d490edca42d852fa8004ec16780be2"},
+ {file = "Cython-3.0.11-cp38-cp38-win_amd64.whl", hash = "sha256:187685e25e037320cae513b8cc4bf9dbc4465c037051aede509cbbf207524de2"},
+ {file = "Cython-3.0.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0fc6fdd6fa493be7bdda22355689d5446ac944cd71286f6f44a14b0d67ee3ff5"},
+ {file = "Cython-3.0.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b1d1f6f94cc5d42a4591f6d60d616786b9cd15576b112bc92a23131fcf38020"},
+ {file = "Cython-3.0.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ab2b92a3e6ed552adbe9350fd2ef3aa0cc7853cf91569f9dbed0c0699bbeab"},
+ {file = "Cython-3.0.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:104d6f2f2c827ccc5e9e42c80ef6773a6aa94752fe6bc5b24a4eab4306fb7f07"},
+ {file = "Cython-3.0.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:13062ce556a1e98d2821f7a0253b50569fdc98c36efd6653a65b21e3f8bbbf5f"},
+ {file = "Cython-3.0.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:525d09b3405534763fa73bd78c8e51ac8264036ce4c16d37dfd1555a7da6d3a7"},
+ {file = "Cython-3.0.11-cp39-cp39-win32.whl", hash = "sha256:b8c7e514075696ca0f60c337f9e416e61d7ccbc1aa879a56c39181ed90ec3059"},
+ {file = "Cython-3.0.11-cp39-cp39-win_amd64.whl", hash = "sha256:8948802e1f5677a673ea5d22a1e7e273ca5f83e7a452786ca286eebf97cee67c"},
+ {file = "Cython-3.0.11-py2.py3-none-any.whl", hash = "sha256:0e25f6425ad4a700d7f77cd468da9161e63658837d1bc34861a9861a4ef6346d"},
+ {file = "cython-3.0.11.tar.gz", hash = "sha256:7146dd2af8682b4ca61331851e6aebce9fe5158e75300343f80c07ca80b1faff"},
+]
+
[[package]]
name = "databricks-api"
version = "0.9.0"
@@ -794,6 +905,25 @@ requests = ">=2.17.3"
six = ">=1.10.0"
tabulate = ">=0.7.7"
+[[package]]
+name = "databricks-sdk"
+version = "0.32.1"
+description = "Databricks SDK for Python (Beta)"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "databricks_sdk-0.32.1-py3-none-any.whl", hash = "sha256:b91efdd0c9d49db3ce47d1ee1cbe322cf24189d426da46d1f74e2bfd4e352361"},
+ {file = "databricks_sdk-0.32.1.tar.gz", hash = "sha256:8af15b7f94b1ae609f91f4a6dac43f9ebdd786c1077050ffc5cd5ab5eda39d49"},
+]
+
+[package.dependencies]
+google-auth = ">=2.0,<3.0"
+requests = ">=2.28.1,<3"
+
+[package.extras]
+dev = ["autoflake", "databricks-connect", "ipython", "ipywidgets", "isort", "pycodestyle", "pyfakefs", "pytest", "pytest-cov", "pytest-mock", "pytest-rerunfailures", "pytest-xdist", "requests-mock", "wheel", "yapf"]
+notebook = ["ipython (>=8,<9)", "ipywidgets (>=8,<9)"]
+
[[package]]
name = "dataclasses"
version = "0.6"
@@ -969,14 +1099,14 @@ type = "url"
url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz"
[[package]]
-name = "entrypoints"
-version = "0.4"
-description = "Discover and load entry points from installed packages."
-optional = true
+name = "et-xmlfile"
+version = "1.1.0"
+description = "An implementation of lxml.xmlfile for the standard library"
+optional = false
python-versions = ">=3.6"
files = [
- {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"},
- {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"},
+ {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"},
+ {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"},
]
[[package]]
@@ -1325,18 +1455,45 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.32"
+version = "3.1.43"
description = "GitPython is a Python library used to interact with Git repositories"
optional = true
python-versions = ">=3.7"
files = [
- {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"},
- {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"},
+ {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"},
+ {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
+[package.extras]
+doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"]
+test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"]
+
+[[package]]
+name = "google-auth"
+version = "2.34.0"
+description = "Google Authentication Library"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"},
+ {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"},
+]
+
+[package.dependencies]
+cachetools = ">=2.0.0,<6.0"
+pyasn1-modules = ">=0.2.1"
+rsa = ">=3.1.4,<5"
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"]
+enterprise-cert = ["cryptography", "pyopenssl"]
+pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
+requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
+
[[package]]
name = "graphene"
version = "3.3"
@@ -1537,13 +1694,13 @@ socks = ["socksio (==1.*)"]
[[package]]
name = "huggingface-hub"
-version = "0.21.3"
+version = "0.24.7"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "huggingface_hub-0.21.3-py3-none-any.whl", hash = "sha256:b183144336fdf2810a8c109822e0bb6ef1fd61c65da6fb60e8c3f658b7144016"},
- {file = "huggingface_hub-0.21.3.tar.gz", hash = "sha256:26a15b604e4fc7bad37c467b76456543ec849386cbca9cd7e1e135f53e500423"},
+ {file = "huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5"},
+ {file = "huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207"},
]
[package.dependencies]
@@ -1556,16 +1713,17 @@ tqdm = ">=4.42.1"
typing-extensions = ">=3.7.4.3"
[package.extras]
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
hf-transfer = ["hf-transfer (>=0.1.4)"]
-inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"]
-quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"]
+inference = ["aiohttp", "minijinja (>=1.0)"]
+quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"]
tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
-torch = ["safetensors", "torch"]
+tensorflow-testing = ["keras (<3.0)", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["safetensors[torch]", "torch"]
typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
[[package]]
@@ -2370,49 +2528,72 @@ requests = "*"
[[package]]
name = "mlflow"
-version = "2.14.0"
+version = "2.16.2"
description = "MLflow is an open source platform for the complete machine learning lifecycle"
optional = true
python-versions = ">=3.8"
files = [
- {file = "mlflow-2.14.0-py3-none-any.whl", hash = "sha256:e4ef6b546af98edfa5e71b11915f72635bbbc833220bd663e0d86c13717176cf"},
- {file = "mlflow-2.14.0.tar.gz", hash = "sha256:36de3105d841fd9ada117e6c82d0f2a7a0c09b7e45b20923b2ec4ddf441e015f"},
+ {file = "mlflow-2.16.2-py3-none-any.whl", hash = "sha256:7ed8f1d27e719a19592d9582e4415aa76abb3de53c524d6b9c66cbf5e00a1023"},
+ {file = "mlflow-2.16.2.tar.gz", hash = "sha256:322512bcdd13d87039cd60ebcd4370ce16115fb5360905010978575202e57876"},
]
[package.dependencies]
alembic = "<1.10.0 || >1.10.0,<2"
-cachetools = ">=5.0.0,<6"
-click = ">=7.0,<9"
-cloudpickle = "<4"
docker = ">=4.0.0,<8"
-entrypoints = "<1"
Flask = "<4"
-gitpython = ">=3.1.9,<4"
graphene = "<4"
-gunicorn = {version = "<23", markers = "platform_system != \"Windows\""}
-importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<8"
+gunicorn = {version = "<24", markers = "platform_system != \"Windows\""}
Jinja2 = [
{version = ">=2.11,<4", markers = "platform_system != \"Windows\""},
{version = ">=3.0,<4", markers = "platform_system == \"Windows\""},
]
markdown = ">=3.3,<4"
matplotlib = "<4"
-numpy = "<2"
+mlflow-skinny = "2.16.2"
+numpy = "<3"
+pandas = "<3"
+pyarrow = ">=4.0.0,<18"
+scikit-learn = "<2"
+scipy = "<2"
+sqlalchemy = ">=1.4.0,<3"
+waitress = {version = "<4", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+aliyun-oss = ["aliyunstoreplugin"]
+databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"]
+extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"]
+gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"]
+genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"]
+jfrog = ["mlflow-jfrog-plugin"]
+langchain = ["langchain (>=0.1.0,<=0.2.15)"]
+sqlserver = ["mlflow-dbstore"]
+xethub = ["mlflow-xethub"]
+
+[[package]]
+name = "mlflow-skinny"
+version = "2.16.2"
+description = "MLflow is an open source platform for the complete machine learning lifecycle"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "mlflow_skinny-2.16.2-py3-none-any.whl", hash = "sha256:c6faf8bddcba3d2bbde45c954c89575b93c4bef1d5e7e026d98fd9966015038c"},
+ {file = "mlflow_skinny-2.16.2.tar.gz", hash = "sha256:c4064506ee8b590dea5dc3a139a890d50996d8ab511fbd34b0266bf69ebaa9d8"},
+]
+
+[package.dependencies]
+cachetools = ">=5.0.0,<6"
+click = ">=7.0,<9"
+cloudpickle = "<4"
+databricks-sdk = ">=0.20.0,<1"
+gitpython = ">=3.1.9,<4"
+importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<9"
opentelemetry-api = ">=1.9.0,<3"
opentelemetry-sdk = ">=1.9.0,<3"
packaging = "<25"
-pandas = "<3"
-protobuf = ">=3.12.0,<5"
-pyarrow = ">=4.0.0,<16"
-pytz = "<2025"
+protobuf = ">=3.12.0,<6"
pyyaml = ">=5.1,<7"
-querystring-parser = "<2"
requests = ">=2.17.3,<3"
-scikit-learn = "<2"
-scipy = "<2"
-sqlalchemy = ">=1.4.0,<3"
sqlparse = ">=0.4.0,<1"
-waitress = {version = "<4", markers = "platform_system == \"Windows\""}
[package.extras]
aliyun-oss = ["aliyunstoreplugin"]
@@ -2421,7 +2602,7 @@ extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (
gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"]
genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"]
jfrog = ["mlflow-jfrog-plugin"]
-langchain = ["langchain (>=0.1.0,<=0.2.3)"]
+langchain = ["langchain (>=0.1.0,<=0.2.15)"]
sqlserver = ["mlflow-dbstore"]
xethub = ["mlflow-xethub"]
@@ -2442,6 +2623,79 @@ docs = ["sphinx"]
gmpy = ["gmpy2 (>=2.1.0a4)"]
tests = ["pytest (>=4.6)"]
+[[package]]
+name = "msgpack"
+version = "1.1.0"
+description = "MessagePack serializer"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"},
+ {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"},
+ {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"},
+ {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"},
+ {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"},
+ {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"},
+ {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"},
+ {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"},
+ {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"},
+ {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"},
+ {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"},
+ {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"},
+ {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"},
+ {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"},
+ {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"},
+ {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"},
+ {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"},
+ {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"},
+ {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"},
+ {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"},
+ {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"},
+ {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"},
+ {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"},
+ {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"},
+ {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"},
+ {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"},
+ {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"},
+ {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"},
+ {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"},
+ {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"},
+ {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"},
+ {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"},
+ {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"},
+ {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"},
+ {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"},
+ {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"},
+ {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"},
+ {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"},
+ {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"},
+ {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"},
+ {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"},
+ {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"},
+ {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"},
+ {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"},
+ {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"},
+ {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"},
+ {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"},
+ {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"},
+ {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"},
+ {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"},
+ {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"},
+ {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"},
+ {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"},
+ {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"},
+ {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"},
+ {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"},
+ {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"},
+ {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"},
+ {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"},
+ {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"},
+ {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"},
+ {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"},
+ {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"},
+ {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"},
+]
+
[[package]]
name = "mslex"
version = "0.3.0"
@@ -2698,6 +2952,48 @@ files = [
[package.dependencies]
setuptools = "*"
+[[package]]
+name = "numexpr"
+version = "2.8.6"
+description = "Fast numerical expression evaluator for NumPy"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "numexpr-2.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80acbfefb68bd92e708e09f0a02b29e04d388b9ae72f9fcd57988aca172a7833"},
+ {file = "numexpr-2.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6e884687da8af5955dc9beb6a12d469675c90b8fb38b6c93668c989cfc2cd982"},
+ {file = "numexpr-2.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ef7e8aaa84fce3aba2e65f243d14a9f8cc92aafd5d90d67283815febfe43eeb"},
+ {file = "numexpr-2.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee04d72307c09599f786b9231acffb10df7d7a74b2ce3681d74a574880d13ce"},
+ {file = "numexpr-2.8.6-cp310-cp310-win32.whl", hash = "sha256:211804ec25a9f6d188eadf4198dd1a92b2f61d7d20993c6c7706139bc4199c5b"},
+ {file = "numexpr-2.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:18b1804923cfa3be7bbb45187d01c0540c8f6df4928c22a0f786e15568e9ebc5"},
+ {file = "numexpr-2.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95b9da613761e4fc79748535b2a1f58cada22500e22713ae7d9571fa88d1c2e2"},
+ {file = "numexpr-2.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47b45da5aa25600081a649f5e8b2aa640e35db3703f4631f34bb1f2f86d1b5b4"},
+ {file = "numexpr-2.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84979bf14143351c2db8d9dd7fef8aca027c66ad9df9cb5e75c93bf5f7b5a338"},
+ {file = "numexpr-2.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d36528a33aa9c23743b3ea686e57526a4f71e7128a1be66210e1511b09c4e4e9"},
+ {file = "numexpr-2.8.6-cp311-cp311-win32.whl", hash = "sha256:681812e2e71ff1ba9145fac42d03f51ddf6ba911259aa83041323f68e7458002"},
+ {file = "numexpr-2.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:27782177a0081bd0aab229be5d37674e7f0ab4264ef576697323dd047432a4cd"},
+ {file = "numexpr-2.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ef6e8896457a60a539cb6ba27da78315a9bb31edb246829b25b5b0304bfcee91"},
+ {file = "numexpr-2.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e640bc0eaf1b59f3dde52bc02bbfda98e62f9950202b0584deba28baf9f36bbb"},
+ {file = "numexpr-2.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d126938c2c3784673c9c58d94e00b1570aa65517d9c33662234d442fc9fb5795"},
+ {file = "numexpr-2.8.6-cp37-cp37m-win32.whl", hash = "sha256:e93d64cd20940b726477c3cb64926e683d31b778a1e18f9079a5088fd0d8e7c8"},
+ {file = "numexpr-2.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:31cf610c952eec57081171f0b4427f9bed2395ec70ec432bbf45d260c5c0cdeb"},
+ {file = "numexpr-2.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b5f96c89aa0b1f13685ec32fa3d71028db0b5981bfd99a0bbc271035949136b3"},
+ {file = "numexpr-2.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c8f37f7a6af3bdd61f2efd1cafcc083a9525ab0aaf5dc641e7ec8fc0ae2d3aa1"},
+ {file = "numexpr-2.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38b8b90967026bbc36c7aa6e8ca3b8906e1990914fd21f446e2a043f4ee3bc06"},
+ {file = "numexpr-2.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1967c16f61c27df1cdc43ba3c0ba30346157048dd420b4259832276144d0f64e"},
+ {file = "numexpr-2.8.6-cp38-cp38-win32.whl", hash = "sha256:15469dc722b5ceb92324ec8635411355ebc702303db901ae8cc87f47c5e3a124"},
+ {file = "numexpr-2.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:95c09e814b0d6549de98b5ded7cdf7d954d934bb6b505432ff82e83a6d330bda"},
+ {file = "numexpr-2.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0f661f5f4872fd7350cc9895f5d2594794b2a7e7f1961649a351724c64acc9"},
+ {file = "numexpr-2.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8e3e6f1588d6c03877cb3b3dcc3096482da9d330013b886b29cb9586af5af3eb"},
+ {file = "numexpr-2.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8564186aad5a2c88d597ebc79b8171b52fd33e9b085013e1ff2208f7e4b387e3"},
+ {file = "numexpr-2.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a88d71c166e86b98d34701285d23e3e89d548d9f5ae3f4b60919ac7151949f"},
+ {file = "numexpr-2.8.6-cp39-cp39-win32.whl", hash = "sha256:c48221b6a85494a7be5a022899764e58259af585dff031cecab337277278cc93"},
+ {file = "numexpr-2.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:6d7003497d82ef19458dce380b36a99343b96a3bd5773465c2d898bf8f5a38f9"},
+ {file = "numexpr-2.8.6.tar.gz", hash = "sha256:6336f8dba3f456e41a4ffc3c97eb63d89c73589ff6e1707141224b930263260d"},
+]
+
+[package.dependencies]
+numpy = ">=1.13.3"
+
[[package]]
name = "numpy"
version = "1.24.4"
@@ -2775,6 +3071,20 @@ typing-extensions = ">=4.11,<5"
[package.extras]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
+[[package]]
+name = "openpyxl"
+version = "3.1.5"
+description = "A Python library to read/write Excel 2010 xlsx/xlsm files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "openpyxl-3.1.5-py2.py3-none-any.whl", hash = "sha256:5282c12b107bffeef825f4617dc029afaf41d0ea60823bbb665ef3079dc79de2"},
+ {file = "openpyxl-3.1.5.tar.gz", hash = "sha256:cf0e3cf56142039133628b5acffe8ef0c12bc902d2aadd3e0fe5878dc08d1050"},
+]
+
+[package.dependencies]
+et-xmlfile = "*"
+
[[package]]
name = "opentelemetry-api"
version = "1.25.0"
@@ -3034,7 +3344,7 @@ files = [
name = "pillow"
version = "10.0.0"
description = "Python Imaging Library (Fork)"
-optional = true
+optional = false
python-versions = ">=3.8"
files = [
{file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
@@ -3275,6 +3585,17 @@ files = [
[package.extras]
tests = ["pytest"]
+[[package]]
+name = "py-cpuinfo"
+version = "9.0.0"
+description = "Get CPU info with pure Python"
+optional = false
+python-versions = "*"
+files = [
+ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"},
+ {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"},
+]
+
[[package]]
name = "py4j"
version = "0.10.9"
@@ -3323,6 +3644,31 @@ files = [
[package.dependencies]
numpy = ">=1.16.6"
+[[package]]
+name = "pyasn1"
+version = "0.6.1"
+description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
+ {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
+]
+
+[[package]]
+name = "pyasn1-modules"
+version = "0.4.1"
+description = "A collection of ASN.1-based protocols modules"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
+ {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.7.0"
+
[[package]]
name = "pycodestyle"
version = "2.9.1"
@@ -3336,47 +3682,47 @@ files = [
[[package]]
name = "pydantic"
-version = "1.10.8"
+version = "1.10.11"
description = "Data validation and settings management using python type hints"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"},
- {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"},
- {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"},
- {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"},
- {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"},
- {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"},
- {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"},
- {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"},
- {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"},
- {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"},
- {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"},
- {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"},
- {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"},
- {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"},
- {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"},
- {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"},
- {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"},
- {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"},
- {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"},
- {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"},
- {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"},
- {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"},
- {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"},
- {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"},
- {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"},
- {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"},
- {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"},
- {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"},
- {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"},
- {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"},
- {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"},
- {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"},
- {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"},
- {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"},
- {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"},
- {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"},
+ {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"},
+ {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"},
+ {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"},
+ {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"},
+ {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"},
+ {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"},
+ {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"},
+ {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"},
+ {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"},
+ {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"},
+ {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"},
+ {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"},
+ {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"},
+ {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"},
+ {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"},
+ {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"},
+ {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"},
+ {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"},
+ {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"},
+ {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"},
+ {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"},
+ {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"},
+ {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"},
+ {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"},
+ {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"},
+ {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"},
+ {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"},
+ {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"},
+ {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"},
+ {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"},
+ {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"},
+ {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"},
+ {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"},
+ {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"},
+ {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"},
+ {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"},
]
[package.dependencies]
@@ -3622,20 +3968,6 @@ files = [
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
-[[package]]
-name = "querystring-parser"
-version = "1.2.4"
-description = "QueryString parser for Python/Django that correctly handles nested dictionaries"
-optional = true
-python-versions = "*"
-files = [
- {file = "querystring_parser-1.2.4-py2.py3-none-any.whl", hash = "sha256:d2fa90765eaf0de96c8b087872991a10238e89ba015ae59fedfed6bd61c242a0"},
- {file = "querystring_parser-1.2.4.tar.gz", hash = "sha256:644fce1cffe0530453b43a83a38094dbe422ccba8c9b2f2a1c00280e14ca8a62"},
-]
-
-[package.dependencies]
-six = "*"
-
[[package]]
name = "regex"
version = "2023.6.3"
@@ -3788,6 +4120,20 @@ nltk = "*"
numpy = "*"
six = ">=1.14.0"
+[[package]]
+name = "rsa"
+version = "4.9"
+description = "Pure-Python RSA implementation"
+optional = true
+python-versions = ">=3.6,<4"
+files = [
+ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"},
+ {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
[[package]]
name = "s3transfer"
version = "0.10.1"
@@ -4427,6 +4773,43 @@ files = [
[package.dependencies]
mpmath = ">=0.19"
+[[package]]
+name = "tables"
+version = "3.8.0"
+description = "Hierarchical datasets for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "tables-3.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:01e82e40f9845f71de137b4472210909e35c440bbcd0858bdd2871715daef4c7"},
+ {file = "tables-3.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db185d855afd45a7259ddd0b53e5f2f8993bb134b370002c6c19532f27ce92ac"},
+ {file = "tables-3.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70a3585a268beee6d0e71bfc9abec98da84d168182f350a2ffa1ae5e42798c18"},
+ {file = "tables-3.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:117cf0f73ee2a5cba5c2b04e4aca375779aec66045aa63128e043dc608f2023b"},
+ {file = "tables-3.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2861cd3ef9eb95eead7530e4de49fd130954871e7e6d2e288012797cb9d7c2e8"},
+ {file = "tables-3.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e9bdbfbe025b6c751976382123c5f5cbd8fab6956aed776b0e8c889669e90d3"},
+ {file = "tables-3.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0821007048f2af8c1a21eb3d832072046c5df366e39587a7c7e4afad14e73fc"},
+ {file = "tables-3.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:b9370c2a4dc0051aad6b71de4f1f9b0b8b60d30b662df5c742434f2b5c6a005e"},
+ {file = "tables-3.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e19686fad4e8f5a91c3dc1eb4b7ea928838e86fefa474c63c5787a125ea79fc7"},
+ {file = "tables-3.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:239f15fa9881c257b5c0d9fb4cb8832778af1c5c8c1db6f6722466f8f26541e2"},
+ {file = "tables-3.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c83a74cac3c0629a0e83570d465f88843ef3609ef56a8ef9a49ee85ab3b8f02f"},
+ {file = "tables-3.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:a5ccb80651c5fad6ac744e2a756b28cfac78eab3b8503f4a2320ee6653b3bee9"},
+ {file = "tables-3.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3375bfafc6cf305d13617a572ab3fffc51fae2fbe0f6efce9407a41f79970b62"},
+ {file = "tables-3.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:254a4d5c2009c7ebe4293b02b8d91ea60837bff85a3c0a40cd075b8f12b1e6c3"},
+ {file = "tables-3.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da3c96456c473fb977cf6dbca9e889710ac020df1fa5b9ebb7f676e83996337d"},
+ {file = "tables-3.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:72da9404094ef8277bf62fce8873e8dc141cee9a8763ec8e7080b2d0de206094"},
+ {file = "tables-3.8.0.tar.gz", hash = "sha256:34f3fa2366ce20b18f1df573a77c1d27306ce1f2a41d9f9eff621b5192ea8788"},
+]
+
+[package.dependencies]
+blosc2 = ">=2.0.0,<2.1.0"
+cython = ">=0.29.21"
+numexpr = ">=2.6.2"
+numpy = ">=1.19.0"
+packaging = "*"
+py-cpuinfo = "*"
+
+[package.extras]
+doc = ["ipython", "numpydoc", "sphinx (>=1.1)", "sphinx-rtd-theme"]
+
[[package]]
name = "tabulate"
version = "0.9.0"
@@ -4612,130 +4995,120 @@ blobfile = ["blobfile (>=2)"]
[[package]]
name = "tokenizers"
-version = "0.15.2"
+version = "0.19.1"
description = ""
optional = false
python-versions = ">=3.7"
files = [
- {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"},
- {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"},
- {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"},
- {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"},
- {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"},
- {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"},
- {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"},
- {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"},
- {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"},
- {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"},
- {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"},
- {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"},
- {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"},
- {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"},
- {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"},
- {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"},
- {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"},
- {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"},
- {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"},
- {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"},
- {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"},
- {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"},
- {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"},
- {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"},
- {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"},
- {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"},
- {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"},
- {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"},
- {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"},
- {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"},
- {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"},
- {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"},
- {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"},
- {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"},
- {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"},
- {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"},
- {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"},
- {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"},
- {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"},
- {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"},
- {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"},
- {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"},
- {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"},
- {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"},
- {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"},
- {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"},
- {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"},
- {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"},
- {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"},
- {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"},
- {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"},
- {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"},
+ {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"},
+ {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"},
+ {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"},
+ {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"},
+ {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"},
+ {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"},
+ {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"},
+ {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"},
+ {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"},
+ {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"},
+ {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"},
+ {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"},
+ {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"},
+ {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"},
+ {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"},
+ {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"},
+ {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"},
+ {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"},
+ {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"},
+ {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"},
+ {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"},
+ {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"},
+ {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"},
+ {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"},
+ {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"},
+ {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"},
+ {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"},
+ {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"},
+ {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"},
+ {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"},
+ {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"},
+ {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"},
+ {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"},
]
[package.dependencies]
-huggingface_hub = ">=0.16.4,<1.0"
+huggingface-hub = ">=0.16.4,<1.0"
[package.extras]
dev = ["tokenizers[testing]"]
-docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"]
-testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
[[package]]
name = "tomli"
@@ -4824,41 +5197,40 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
[[package]]
name = "transformers"
-version = "4.38.2"
+version = "4.44.2"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "transformers-4.38.2-py3-none-any.whl", hash = "sha256:c4029cb9f01b3dd335e52f364c52d2b37c65b4c78e02e6a08b1919c5c928573e"},
- {file = "transformers-4.38.2.tar.gz", hash = "sha256:c5fc7ad682b8a50a48b2a4c05d4ea2de5567adb1bdd00053619dbe5960857dd5"},
+ {file = "transformers-4.44.2-py3-none-any.whl", hash = "sha256:1c02c65e7bfa5e52a634aff3da52138b583fc6f263c1f28d547dc144ba3d412d"},
+ {file = "transformers-4.44.2.tar.gz", hash = "sha256:36aa17cc92ee154058e426d951684a2dab48751b35b49437896f898931270826"},
]
[package.dependencies]
filelock = "*"
-huggingface-hub = ">=0.19.3,<1.0"
+huggingface-hub = ">=0.23.2,<1.0"
numpy = ">=1.17"
packaging = ">=20.0"
pyyaml = ">=5.1"
regex = "!=2019.12.17"
requests = "*"
safetensors = ">=0.4.1"
-tokenizers = ">=0.14,<0.19"
+tokenizers = ">=0.19,<0.20"
tqdm = ">=4.27"
[package.extras]
accelerate = ["accelerate (>=0.21.0)"]
agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"]
-all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"]
+all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"]
audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+benchmark = ["optimum-benchmark (>=0.2.0)"]
codecarbon = ["codecarbon (==1.2.0)"]
deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"]
-deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
-dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
-dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"]
-dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
-docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"]
-docs-specific = ["hf-doc-builder"]
-flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"]
+deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"]
+dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"]
flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
ftfy = ["ftfy"]
integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"]
@@ -4868,25 +5240,26 @@ natten = ["natten (>=0.14.6,<0.15.0)"]
onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"]
onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
optuna = ["optuna"]
-quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"]
+quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.5.1)", "urllib3 (<2.0.0)"]
ray = ["ray[tune] (>=2.7.0)"]
retrieval = ["datasets (!=2.5.0)", "faiss-cpu"]
+ruff = ["ruff (==0.5.1)"]
sagemaker = ["sagemaker (>=2.31.0)"]
sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"]
serving = ["fastapi", "pydantic", "starlette", "uvicorn"]
sigopt = ["sigopt"]
sklearn = ["scikit-learn"]
speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
-testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"]
-tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"]
-tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"]
+testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"]
+tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"]
tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
-timm = ["timm"]
-tokenizers = ["tokenizers (>=0.14,<0.19)"]
+timm = ["timm (<=0.9.16)"]
+tokenizers = ["tokenizers (>=0.19,<0.20)"]
torch = ["accelerate (>=0.21.0)", "torch"]
torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"]
-torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch", "tqdm (>=4.27)"]
+torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"]
video = ["av (==9.2.0)", "decord (==0.6.0)"]
vision = ["Pillow (>=10.0.1,<=15.0)"]
@@ -5380,4 +5753,4 @@ transformers = ["accelerate", "datasets", "torch", "transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
-content-hash = "477b1347105836e413565aa36f398e96038fba0de7daa8f6123e7d03e5fe4907"
+content-hash = "7c8dc3eabf8a4d28f97b9be0f2a9fb70261baef10e3d2ef996fe56a906c36a45"
diff --git a/pyproject.toml b/pyproject.toml
index 11af70c0d..489d8a4e7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langtest"
-version = "2.3.1"
+version = "2.4.0"
description = "John Snow Labs provides a library for delivering safe & effective NLP models."
authors = ["John Snow Labs "]
readme = "README.md"
@@ -51,11 +51,11 @@ exclude = 'langtest/errors.py'
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
-pydantic = "1.10.8"
+pydantic = "1.10.11"
johnsnowlabs = { version = "4.3.5", optional = true }
rouge-score = { version = "^0.1.2", optional = true }
evaluate = { version = "^0.4.0", optional = true }
-transformers = "^4.38.2"
+transformers = "^4.44.2"
huggingface_hub = { version = ">0.16.0", optional = true}
spacy = { version = ">=3.0.0", optional = true }
nest-asyncio = "^1.5.0"
@@ -68,9 +68,9 @@ tqdm = "^4.65.0"
cohere = { version = "^4.10.0", optional = true}
ai21 = {version = "^1.1.0", optional = true}
metaflow = {version = ">=2.9.0", optional = true}
-accelerate = {version = "<0.21.0", optional = true}
+accelerate = {version = "0.33.0", optional = true}
seqeval = {version = "^1.2.0", optional = true}
-mlflow = {version = "^2.14.0", optional = true}
+mlflow = {version = "^2.16.2", optional = true}
datasets = {version = ">=2.14.0", optional = true}
matplotlib = {version = "^3.7.2", optional = true}
tenacity = {version = "^8.2.2", optional = true}
@@ -80,6 +80,9 @@ langchain-openai = {version = "^0.1.4", optional = true}
boto3 = {version = "^1.34.93", optional = true}
importlib-resources = "^6.4.0"
click = "^8.1.7"
+openpyxl = "^3.1.5"
+tables = "3.8.0"
+pillow = "10.0.0"
[tool.poetry.extras]
transformers = ["transformers", "torch", "accelerate", "datasets"]
@@ -112,7 +115,7 @@ lint = "pflake8 langtest/"
format = "black langtest/ tests/"
check-docstrings = "pydocstyle langtest/ --add-select=D417 --add-ignore=D100,D104,D105,D400,D415 --convention=google"
is-formatted = "black --check langtest/ tests/"
-force-cpu-torch = "python -m pip install transformers[torch]"
+force-cpu-torch = "python -m pip install torch --index-url https://download.pytorch.org/whl/cpu"
extra-lib = "python -m pip install openpyxl tables"
diff --git a/tests/test_robustness.py b/tests/test_robustness.py
index 70e6bd78f..8b332db87 100644
--- a/tests/test_robustness.py
+++ b/tests/test_robustness.py
@@ -469,7 +469,10 @@ def setUp(self) -> None:
test: list(scenarios.keys()) for test, scenarios in test_scenarios.items()
}
- self.perturbations_list = self.available_tests["robustness"]
+ self.perturbations_list = [
+ i for i in self.available_tests["robustness"] if not i.startswith("image_")
+ ]
+
self.supported_tests = self.available_test()
self.samples = {
"question-answering": [