diff --git a/notebooks/paddle-ocr-webcam/paddle-ocr-webcam.ipynb b/notebooks/paddle-ocr-webcam/paddle-ocr-webcam.ipynb index 46e956a73be..2d4ac10b9a0 100644 --- a/notebooks/paddle-ocr-webcam/paddle-ocr-webcam.ipynb +++ b/notebooks/paddle-ocr-webcam/paddle-ocr-webcam.ipynb @@ -150,16 +150,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", - "core = ov.Core()\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = utils.device_widget()\n", "\n", "device" ] diff --git a/notebooks/paddle-to-openvino/paddle-to-openvino-classification.ipynb b/notebooks/paddle-to-openvino/paddle-to-openvino-classification.ipynb index 6e57118d6de..e625c3e4722 100644 --- a/notebooks/paddle-to-openvino/paddle-to-openvino-classification.ipynb +++ b/notebooks/paddle-to-openvino/paddle-to-openvino-classification.ipynb @@ -109,7 +109,7 @@ "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", "\n", - "from notebook_utils import download_file" + "from notebook_utils import download_file, device_widget" ] }, { @@ -426,15 +426,8 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", "core = ov.Core()\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/paint-by-example/paint-by-example.ipynb b/notebooks/paint-by-example/paint-by-example.ipynb index 23322679c72..acc10394201 100644 --- a/notebooks/paint-by-example/paint-by-example.ipynb +++ b/notebooks/paint-by-example/paint-by-example.ipynb @@ -130,7 +130,7 @@ "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", "\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget, quantization_widget\n", "\n", "download_file(\n", " \"https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377210-edc98e97-0e43-4796-b771-dacd074c39ea.png\",\n", @@ -857,17 +857,7 @@ } ], "source": [ - "from openvino import Core\n", - "import ipywidgets as widgets\n", - "\n", - "core = Core()\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] @@ -896,6 +886,7 @@ "source": [ "ov_config = {\"INFERENCE_PRECISION_HINT\": \"f32\"} if device.value != \"CPU\" else {}\n", "\n", + "core = ov.Core()\n", "\n", "def get_ov_pipeline():\n", " image_encoder_inpaint = core.compile_model(IMAGE_ENCODER_OV_PATH_INPAINT, device.value)\n", @@ -961,17 +952,11 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", "UNET_INT8_OV_PATH = Path(\"model/unet_int8.xml\")\n", "int8_ov_pipe_inpaint = None\n", "\n", "\n", - "to_quantize = widgets.Checkbox(\n", - " value=True,\n", - " description=\"Quantization\",\n", - " disabled=False,\n", - ")\n", + "to_quantize = quantization_widget()\n", "\n", "to_quantize" ] @@ -1527,6 +1512,8 @@ } ], "source": [ + "import ipywidgets as widgets\n", + "\n", "available_models = [\"FP16\"]\n", "\n", "if UNET_INT8_OV_PATH.exists():\n", diff --git a/notebooks/person-tracking-webcam/person-tracking.ipynb b/notebooks/person-tracking-webcam/person-tracking.ipynb index 20b978ae24a..72f09450ef2 100644 --- a/notebooks/person-tracking-webcam/person-tracking.ipynb +++ b/notebooks/person-tracking-webcam/person-tracking.ipynb @@ -288,14 +288,7 @@ "metadata": {}, "outputs": [], "source": [ - "import ipywidgets as widgets\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = utils.device_widget()\n", "\n", "device" ] diff --git a/notebooks/pose-estimation-webcam/pose-estimation.ipynb b/notebooks/pose-estimation-webcam/pose-estimation.ipynb index fa68cdb0263..e2ea697e63e 100644 --- a/notebooks/pose-estimation-webcam/pose-estimation.ipynb +++ b/notebooks/pose-estimation-webcam/pose-estimation.ipynb @@ -144,16 +144,7 @@ "metadata": {}, "outputs": [], "source": [ - "import ipywidgets as widgets\n", - "\n", - "core = ov.Core()\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = utils.device_widget()\n", "\n", "device" ] @@ -197,6 +188,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "jp-MarkdownHeadingCollapsed": true diff --git a/notebooks/pytorch-post-training-quantization-nncf/pytorch-post-training-quantization-nncf.ipynb b/notebooks/pytorch-post-training-quantization-nncf/pytorch-post-training-quantization-nncf.ipynb index 8936277f1ab..28fbfb28b3b 100644 --- a/notebooks/pytorch-post-training-quantization-nncf/pytorch-post-training-quantization-nncf.ipynb +++ b/notebooks/pytorch-post-training-quantization-nncf/pytorch-post-training-quantization-nncf.ipynb @@ -114,7 +114,7 @@ ")\n", "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file" + "from notebook_utils import download_file, device_widget" ] }, { @@ -761,15 +761,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", - "core = ov.Core()\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/pytorch-quantization-aware-training/pytorch-quantization-aware-training.ipynb b/notebooks/pytorch-quantization-aware-training/pytorch-quantization-aware-training.ipynb index dd9d530eab7..59e074197a5 100644 --- a/notebooks/pytorch-quantization-aware-training/pytorch-quantization-aware-training.ipynb +++ b/notebooks/pytorch-quantization-aware-training/pytorch-quantization-aware-training.ipynb @@ -166,7 +166,7 @@ ")\n", "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget\n", "\n", "torch.manual_seed(0)\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", @@ -1039,16 +1039,7 @@ "metadata": {}, "outputs": [], "source": [ - "import ipywidgets as widgets\n", - "\n", - "# Initialize OpenVINO runtime\n", - "core = ov.Core()\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices,\n", - " value=\"CPU\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] @@ -1117,6 +1108,7 @@ } ], "source": [ + "core = ov.Core()\n", "core.get_property(device.value, \"FULL_DEVICE_NAME\")" ] } diff --git a/notebooks/pytorch-quantization-sparsity-aware-training/pytorch-quantization-sparsity-aware-training.ipynb b/notebooks/pytorch-quantization-sparsity-aware-training/pytorch-quantization-sparsity-aware-training.ipynb index 9a78a661907..3e2d64e7d4c 100644 --- a/notebooks/pytorch-quantization-sparsity-aware-training/pytorch-quantization-sparsity-aware-training.ipynb +++ b/notebooks/pytorch-quantization-sparsity-aware-training/pytorch-quantization-sparsity-aware-training.ipynb @@ -137,7 +137,7 @@ " url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\",\n", ")\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget\n", "\n", "DATA_DIR = Path(\"data\")\n", "\n", @@ -658,16 +658,11 @@ "metadata": {}, "outputs": [], "source": [ - "import ipywidgets as widgets\n", + "\n", "\n", "# Initialize OpenVINO runtime\n", "core = ov.Core()\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices,\n", - " value=\"CPU\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/pytorch-to-openvino/pytorch-onnx-to-openvino.ipynb b/notebooks/pytorch-to-openvino/pytorch-onnx-to-openvino.ipynb index c2ba18f22ad..6da1dab7a6a 100644 --- a/notebooks/pytorch-to-openvino/pytorch-onnx-to-openvino.ipynb +++ b/notebooks/pytorch-to-openvino/pytorch-onnx-to-openvino.ipynb @@ -119,6 +119,7 @@ " SegmentationMap,\n", " Label,\n", " download_file,\n", + " device_widget\n", ")" ] }, @@ -470,14 +471,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb b/notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb index bd2634664ed..5e47b4ad4a7 100644 --- a/notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb +++ b/notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb @@ -129,7 +129,7 @@ "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", "\n", - "from notebook_utils import download_file" + "from notebook_utils import download_file, device_widget" ] }, { @@ -698,16 +698,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", - "core = ov.Core()\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/segment-anything/segment-anything.ipynb b/notebooks/segment-anything/segment-anything.ipynb index ae817d26bd1..94d91400c45 100644 --- a/notebooks/segment-anything/segment-anything.ipynb +++ b/notebooks/segment-anything/segment-anything.ipynb @@ -152,7 +152,7 @@ ")\n", "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget\n", "\n", "checkpoint = \"sam_vit_b_01ec64.pth\"\n", "model_url = \"https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth\"\n", @@ -243,14 +243,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb b/notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb index 764ddd16cad..af8550c80ea 100644 --- a/notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb +++ b/notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb @@ -84,7 +84,7 @@ ")\n", "\n", "open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget\n", "\n", "# ContentVec\n", "download_file(\n", @@ -332,17 +332,11 @@ }, "outputs": [], "source": [ - "import ipywidgets as widgets\n", "import openvino as ov\n", "\n", "core = ov.Core()\n", "\n", - "device = widgets.Dropdown(\n", - " options=core.available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] diff --git a/notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb b/notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb index d9200784925..a752d5eee60 100644 --- a/notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb +++ b/notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb @@ -117,7 +117,7 @@ "if not Path(\"notebook_utils.py\").exists():\n", " r = requests.get(url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\")\n", " open(\"notebook_utils.py\", \"w\").write(r.text)\n", - "from notebook_utils import download_file\n", + "from notebook_utils import download_file, device_widget\n", "\n", "IMAGE_WIDTH = 512\n", "IMAGE_HEIGHT = 512\n", @@ -544,15 +544,7 @@ } ], "source": [ - "import ipywidgets as widgets\n", - "import openvino as ov\n", - "\n", - "device = widgets.Dropdown(\n", - " options=ov.Core().available_devices + [\"AUTO\"],\n", - " value=\"AUTO\",\n", - " description=\"Device:\",\n", - " disabled=False,\n", - ")\n", + "device = device_widget()\n", "\n", "device" ] @@ -573,6 +565,8 @@ }, "outputs": [], "source": [ + "import openvino as ov\n", + "\n", "core = ov.Core()\n", "ov_text_encoder = core.compile_model(OV_TEXT_ENCODER_MODEL_PATH, device.value)\n", "ov_diffusion_model = core.compile_model(OV_DIFFUSION_MODEL_PATH, device.value)\n",