From 8767d8d91470269c4ca1785eca9a5c70b2081b93 Mon Sep 17 00:00:00 2001 From: RosaMenchon Date: Fri, 4 Oct 2024 13:41:19 +0200 Subject: [PATCH] 903 demo --- nbs/903_demo_3Dimg_binaryClass.ipynb | 754 +++++++++++++++++++++++++++ 1 file changed, 754 insertions(+) create mode 100644 nbs/903_demo_3Dimg_binaryClass.ipynb diff --git a/nbs/903_demo_3Dimg_binaryClass.ipynb b/nbs/903_demo_3Dimg_binaryClass.ipynb new file mode 100644 index 0000000..3c79ea7 --- /dev/null +++ b/nbs/903_demo_3Dimg_binaryClass.ipynb @@ -0,0 +1,754 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Demo: 3D image binary classification \n", + "\n", + "> SynapseMNIST3D dataset demo\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp demo" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from bioMONAI.data import *\n", + "from bioMONAI.transforms import *\n", + "from bioMONAI.core import *\n", + "from bioMONAI.core import Path\n", + "from bioMONAI.data import get_target, RandomSplitter\n", + "#from bioMONAI.nets import BasicUNet, DynUNet\n", + "from bioMONAI.losses import *\n", + "from bioMONAI.metrics import *\n", + "from bioMONAI.datasets import *\n", + "from bioMONAI.visualize import *\n", + "\n", + "import medmnist\n", + "import os\n", + "\n", + "#from monai.utils import set_determinism\n", + "from monai.transforms import ScaleIntensity\n", + "\n", + "#set_determinism(0)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda\n" + ] + } + ], + "source": [ + "device = get_device()\n", + "print(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Download and store the dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using downloaded and verified file: ../_data/medmnist_data/synapsemnist3d.npz\n", + "Using downloaded and verified file: ../_data/medmnist_data/synapsemnist3d.npz\n", + "Using downloaded and verified file: ../_data/medmnist_data/synapsemnist3d.npz\n", + "Dataset SynapseMNIST3D of size 28 (synapsemnist3d)\n", + " Number of datapoints: 1230\n", + " Root location: ../_data/medmnist_data\n", + " Split: train\n", + " Task: binary-class\n", + " Number of channels: 1\n", + " Meaning of labels: {'0': 'inhibitory synapse', '1': 'excitatory synapse'}\n", + " Number of samples: {'train': 1230, 'val': 177, 'test': 352}\n", + " Description: The SynapseMNIST3D is a new 3D volume dataset to classify whether a synapse is excitatory or inhibitory. It uses a 3D image volume of an adult rat acquired by a multi-beam scanning electron microscope. The original data is of the size 100×100×100um^3 and the resolution 8×8×30nm^3, where a (30um)^3 sub-volume was used in the MitoEM dataset with dense 3D mitochondria instance segmentation labels. Three neuroscience experts segment a pyramidal neuron within the whole volume and proofread all the synapses on this neuron with excitatory/inhibitory labels. For each labeled synaptic location, we crop a 3D volume of 1024×1024×1024nm^3 and resize it into 28×28×28 voxels. Finally, the dataset is randomly split with a ratio of 7:1:2 into training, validation and test set.\n", + " License: CC BY 4.0\n", + "===================\n", + "Dataset SynapseMNIST3D of size 28 (synapsemnist3d)\n", + " Number of datapoints: 177\n", + " Root location: ../_data/medmnist_data\n", + " Split: val\n", + " Task: binary-class\n", + " Number of channels: 1\n", + " Meaning of labels: {'0': 'inhibitory synapse', '1': 'excitatory synapse'}\n", + " Number of samples: {'train': 1230, 'val': 177, 'test': 352}\n", + " Description: The SynapseMNIST3D is a new 3D volume dataset to classify whether a synapse is excitatory or inhibitory. It uses a 3D image volume of an adult rat acquired by a multi-beam scanning electron microscope. The original data is of the size 100×100×100um^3 and the resolution 8×8×30nm^3, where a (30um)^3 sub-volume was used in the MitoEM dataset with dense 3D mitochondria instance segmentation labels. Three neuroscience experts segment a pyramidal neuron within the whole volume and proofread all the synapses on this neuron with excitatory/inhibitory labels. For each labeled synaptic location, we crop a 3D volume of 1024×1024×1024nm^3 and resize it into 28×28×28 voxels. Finally, the dataset is randomly split with a ratio of 7:1:2 into training, validation and test set.\n", + " License: CC BY 4.0\n" + ] + } + ], + "source": [ + "data_flag = 'synapsemnist3d'\n", + "data_path = Path('../_data/medmnist_data/')\n", + "\n", + "# Ensure the save directory exists; create it if not\n", + "os.makedirs(data_path, exist_ok=True)\n", + "\n", + "train_data, val_data, test_data = download_medmnist(data_flag,[],data_path)\n", + "\n", + "# print training and validation datasets\n", + "print(train_data)\n", + "print(\"===================\")\n", + "print(val_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving train set to ../_data/medmnist_data/train/synapsemnist3d, csv_path=../_data/medmnist_data/train/synapsemnist3d.csv...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 1230/1230 [00:32<00:00, 37.49it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving val set to ../_data/medmnist_data/val/synapsemnist3d, csv_path=../_data/medmnist_data/val/synapsemnist3d.csv...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 177/177 [00:04<00:00, 39.19it/s]\n" + ] + } + ], + "source": [ + "train_path = data_path/'train'\n", + "val_path = data_path/'val'\n", + "\n", + "train_data.save(train_path)\n", + "val_data.save(val_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "from fastai.vision.all import CategoryBlock, get_image_files, GrandparentSplitter, parent_label\n", + "\n", + "\n", + "batch_size = 128\n", + "\n", + "data_ops = {\n", + " 'blocks': (BioImageBlock(cls=BioImageBase), CategoryBlock),\n", + " 'get_items': get_image_files,\n", + " 'splitter': GrandparentSplitter(train_name=train_path/'synapsemnist3d', valid_name=val_path/'synapsemnist3d'),\n", + " 'get_y': parent_label,\n", + " 'item_tfms': [ScaleIntensity()],\n", + " 'bs': batch_size,\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Setting affine, but the applied meta contains an affine. This will be overwritten.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setting-up type transforms pipelines\n", + "Collecting items from ../_data/medmnist_data/train/synapsemnist3d\n", + "Found 1230 items\n", + "2 datasets of sizes 984,246\n", + "Setting up Pipeline: BioImageBase.create\n", + "Setting up Pipeline: parent_label -> Categorize -- {'vocab': None, 'sort': True, 'add_na': False}\n", + "\n", + "Building one sample\n", + " Pipeline: BioImageBase.create\n", + " starting from\n", + " ../_data/medmnist_data/train/synapsemnist3d/train628_1.gif\n", + " applying BioImageBase.create gives\n", + " BioImageBase of size 28x28x28x3\n", + " Pipeline: parent_label -> Categorize -- {'vocab': None, 'sort': True, 'add_na': False}\n", + " starting from\n", + " ../_data/medmnist_data/train/synapsemnist3d/train628_1.gif\n", + " applying parent_label gives\n", + " synapsemnist3d\n", + " applying Categorize -- {'vocab': None, 'sort': True, 'add_na': False} gives\n", + " TensorCategory(0)\n", + "\n", + "Final sample: (BioImageBase([[[[141., 141., 141.],\n", + " [ 7., 7., 7.],\n", + " [ 37., 37., 37.],\n", + " ...,\n", + " [ 95., 95., 95.],\n", + " [197., 197., 197.],\n", + " [157., 157., 157.]],\n", + "\n", + " [[ 13., 13., 13.],\n", + " [ 5., 5., 5.],\n", + " [ 52., 52., 52.],\n", + " ...,\n", + " [130., 130., 130.],\n", + " [ 44., 44., 44.],\n", + " [184., 184., 184.]],\n", + "\n", + " [[152., 152., 152.],\n", + " [162., 162., 162.],\n", + " [206., 206., 206.],\n", + " ...,\n", + " [ 47., 47., 47.],\n", + " [ 28., 28., 28.],\n", + " [ 20., 20., 20.]],\n", + "\n", + " ...,\n", + "\n", + " [[ 87., 87., 87.],\n", + " [ 86., 86., 86.],\n", + " [ 57., 57., 57.],\n", + " ...,\n", + " [147., 147., 147.],\n", + " [185., 185., 185.],\n", + " [136., 136., 136.]],\n", + "\n", + " [[ 83., 83., 83.],\n", + " [109., 109., 109.],\n", + " [ 99., 99., 99.],\n", + " ...,\n", + " [ 54., 54., 54.],\n", + " [ 37., 37., 37.],\n", + " [ 48., 48., 48.]],\n", + "\n", + " [[215., 215., 215.],\n", + " [152., 152., 152.],\n", + " [180., 180., 180.],\n", + " ...,\n", + " [ 66., 66., 66.],\n", + " [115., 115., 115.],\n", + " [ 24., 24., 24.]]],\n", + "\n", + "\n", + " [[[118., 118., 118.],\n", + " [ 12., 12., 12.],\n", + " [ 37., 37., 37.],\n", + " ...,\n", + " [ 86., 86., 86.],\n", + " [197., 197., 197.],\n", + " [156., 156., 156.]],\n", + "\n", + " [[ 13., 13., 13.],\n", + " [ 14., 14., 14.],\n", + " [ 62., 62., 62.],\n", + " ...,\n", + " [140., 140., 140.],\n", + " [ 47., 47., 47.],\n", + " [182., 182., 182.]],\n", + "\n", + " [[144., 144., 144.],\n", + " [151., 151., 151.],\n", + " [202., 202., 202.],\n", + " ...,\n", + " [ 41., 41., 41.],\n", + " [ 40., 40., 40.],\n", + " [ 45., 45., 45.]],\n", + "\n", + " ...,\n", + "\n", + " [[103., 103., 103.],\n", + " [101., 101., 101.],\n", + " [ 77., 77., 77.],\n", + " ...,\n", + " [139., 139., 139.],\n", + " [176., 176., 176.],\n", + " [139., 139., 139.]],\n", + "\n", + " [[104., 104., 104.],\n", + " [103., 103., 103.],\n", + " [114., 114., 114.],\n", + " ...,\n", + " [ 69., 69., 69.],\n", + " [ 56., 56., 56.],\n", + " [ 71., 71., 71.]],\n", + "\n", + " [[221., 221., 221.],\n", + " [132., 132., 132.],\n", + " [163., 163., 163.],\n", + " ...,\n", + " [ 96., 96., 96.],\n", + " [132., 132., 132.],\n", + " [ 65., 65., 65.]]],\n", + "\n", + "\n", + " [[[ 40., 40., 40.],\n", + " [ 44., 44., 44.],\n", + " [ 36., 36., 36.],\n", + " ...,\n", + " [ 40., 40., 40.],\n", + " [183., 183., 183.],\n", + " [178., 178., 178.]],\n", + "\n", + " [[ 17., 17., 17.],\n", + " [ 64., 64., 64.],\n", + " [114., 114., 114.],\n", + " ...,\n", + " [181., 181., 181.],\n", + " [ 73., 73., 73.],\n", + " [127., 127., 127.]],\n", + "\n", + " [[123., 123., 123.],\n", + " [124., 124., 124.],\n", + " [206., 206., 206.],\n", + " ...,\n", + " [ 52., 52., 52.],\n", + " [ 86., 86., 86.],\n", + " [159., 159., 159.]],\n", + "\n", + " ...,\n", + "\n", + " [[201., 201., 201.],\n", + " [200., 200., 200.],\n", + " [218., 218., 218.],\n", + " ...,\n", + " [116., 116., 116.],\n", + " [149., 149., 149.],\n", + " [133., 133., 133.]],\n", + "\n", + " [[177., 177., 177.],\n", + " [137., 137., 137.],\n", + " [194., 194., 194.],\n", + " ...,\n", + " [ 81., 81., 81.],\n", + " [ 81., 81., 81.],\n", + " [116., 116., 116.]],\n", + "\n", + " [[144., 144., 144.],\n", + " [ 25., 25., 25.],\n", + " [ 50., 50., 50.],\n", + " ...,\n", + " [226., 226., 226.],\n", + " [208., 208., 208.],\n", + " [207., 207., 207.]]],\n", + "\n", + "\n", + " ...,\n", + "\n", + "\n", + " [[[ 80., 80., 80.],\n", + " [162., 162., 162.],\n", + " [216., 216., 216.],\n", + " ...,\n", + " [129., 129., 129.],\n", + " [127., 127., 127.],\n", + " [ 90., 90., 90.]],\n", + "\n", + " [[143., 143., 143.],\n", + " [106., 106., 106.],\n", + " [ 93., 93., 93.],\n", + " ...,\n", + " [182., 182., 182.],\n", + " [157., 157., 157.],\n", + " [124., 124., 124.]],\n", + "\n", + " [[147., 147., 147.],\n", + " [201., 201., 201.],\n", + " [ 90., 90., 90.],\n", + " ...,\n", + " [101., 101., 101.],\n", + " [ 97., 97., 97.],\n", + " [ 29., 29., 29.]],\n", + "\n", + " ...,\n", + "\n", + " [[199., 199., 199.],\n", + " [128., 128., 128.],\n", + " [ 96., 96., 96.],\n", + " ...,\n", + " [118., 118., 118.],\n", + " [145., 145., 145.],\n", + " [ 98., 98., 98.]],\n", + "\n", + " [[212., 212., 212.],\n", + " [165., 165., 165.],\n", + " [ 69., 69., 69.],\n", + " ...,\n", + " [ 85., 85., 85.],\n", + " [107., 107., 107.],\n", + " [ 58., 58., 58.]],\n", + "\n", + " [[194., 194., 194.],\n", + " [117., 117., 117.],\n", + " [ 95., 95., 95.],\n", + " ...,\n", + " [204., 204., 204.],\n", + " [182., 182., 182.],\n", + " [ 61., 61., 61.]]],\n", + "\n", + "\n", + " [[[ 2., 2., 2.],\n", + " [ 93., 93., 93.],\n", + " [161., 161., 161.],\n", + " ...,\n", + " [ 18., 18., 18.],\n", + " [ 35., 35., 35.],\n", + " [ 19., 19., 19.]],\n", + "\n", + " [[112., 112., 112.],\n", + " [110., 110., 110.],\n", + " [ 48., 48., 48.],\n", + " ...,\n", + " [152., 152., 152.],\n", + " [196., 196., 196.],\n", + " [169., 169., 169.]],\n", + "\n", + " [[146., 146., 146.],\n", + " [156., 156., 156.],\n", + " [ 41., 41., 41.],\n", + " ...,\n", + " [ 34., 34., 34.],\n", + " [ 81., 81., 81.],\n", + " [114., 114., 114.]],\n", + "\n", + " ...,\n", + "\n", + " [[ 64., 64., 64.],\n", + " [ 63., 63., 63.],\n", + " [153., 153., 153.],\n", + " ...,\n", + " [ 47., 47., 47.],\n", + " [107., 107., 107.],\n", + " [ 23., 23., 23.]],\n", + "\n", + " [[ 64., 64., 64.],\n", + " [143., 143., 143.],\n", + " [206., 206., 206.],\n", + " ...,\n", + " [119., 119., 119.],\n", + " [ 20., 20., 20.],\n", + " [ 67., 67., 67.]],\n", + "\n", + " [[ 63., 63., 63.],\n", + " [150., 150., 150.],\n", + " [131., 131., 131.],\n", + " ...,\n", + " [ 57., 57., 57.],\n", + " [219., 219., 219.],\n", + " [193., 193., 193.]]],\n", + "\n", + "\n", + " [[[ 39., 39., 39.],\n", + " [195., 195., 195.],\n", + " [201., 201., 201.],\n", + " ...,\n", + " [133., 133., 133.],\n", + " [ 67., 67., 67.],\n", + " [ 39., 39., 39.]],\n", + "\n", + " [[ 90., 90., 90.],\n", + " [108., 108., 108.],\n", + " [ 69., 69., 69.],\n", + " ...,\n", + " [188., 188., 188.],\n", + " [ 99., 99., 99.],\n", + " [ 65., 65., 65.]],\n", + "\n", + " [[220., 220., 220.],\n", + " [102., 102., 102.],\n", + " [ 49., 49., 49.],\n", + " ...,\n", + " [ 20., 20., 20.],\n", + " [131., 131., 131.],\n", + " [101., 101., 101.]],\n", + "\n", + " ...,\n", + "\n", + " [[116., 116., 116.],\n", + " [ 93., 93., 93.],\n", + " [ 48., 48., 48.],\n", + " ...,\n", + " [ 63., 63., 63.],\n", + " [ 80., 80., 80.],\n", + " [ 81., 81., 81.]],\n", + "\n", + " [[ 35., 35., 35.],\n", + " [ 34., 34., 34.],\n", + " [ 16., 16., 16.],\n", + " ...,\n", + " [139., 139., 139.],\n", + " [172., 172., 172.],\n", + " [174., 174., 174.]],\n", + "\n", + " [[ 53., 53., 53.],\n", + " [ 99., 99., 99.],\n", + " [ 42., 42., 42.],\n", + " ...,\n", + " [128., 128., 128.],\n", + " [ 84., 84., 84.],\n", + " [ 56., 56., 56.]]]]), TensorCategory(0))\n", + "\n", + "\n", + "Collecting items from ../_data/medmnist_data/train/synapsemnist3d\n", + "Found 1230 items\n", + "2 datasets of sizes 984,246\n", + "Setting up Pipeline: BioImageBase.create\n", + "Setting up Pipeline: parent_label -> Categorize -- {'vocab': None, 'sort': True, 'add_na': False}\n", + "Setting up after_item: Pipeline: ScaleIntensity -> ToTensor\n", + "Setting up before_batch: Pipeline: \n", + "Setting up after_batch: Pipeline: Tensor2BioImage -- {}\n", + "\n", + "Building one batch\n", + "Applying item_tfms to the first sample:\n", + " Pipeline: ScaleIntensity -> ToTensor\n", + " starting from\n", + " (BioImageBase of size 28x28x28x3, TensorCategory(0))\n", + " applying ScaleIntensity gives\n", + " (MetaTensor of size 28x28x28x3, metatensor(0.))\n", + " applying ToTensor gives\n", + " (MetaTensor of size 28x28x28x3, metatensor(0.))\n", + "\n", + "Adding the next 3 samples\n", + "\n", + "No before_batch transform to apply\n", + "\n", + "Collating items in a batch\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Setting affine, but the applied meta contains an affine. This will be overwritten.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Applying batch_tfms to the batch built\n", + " Pipeline: Tensor2BioImage -- {}\n", + " starting from\n", + " (MetaTensor of size 4x28x28x28x3, metatensor([0., 0., 0., 0.], device='cuda:0'))\n", + " applying Tensor2BioImage -- {} gives\n", + " (BioImageBase of size 4x28x28x28x3, metatensor([0., 0., 0., 0.], device='cuda:0'))\n", + "None\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Setting affine, but the applied meta contains an affine. This will be overwritten.\n" + ] + } + ], + "source": [ + "data = get_dataloader(\n", + " train_path/'synapsemnist3d',\n", + " show_summary=True,\n", + " **data_ops,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_x = BioImageStack(train_data.imgs)\n", + "print(train_x.shape)\n", + "\n", + "train_y = train_data.labels\n", + "print(train_y.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# visualization\n", + "mosaic_image_3d(train_x[0],cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.data import DataLoader\n", + "\n", + "train_loader = DataLoader([train_data, val_data], batch_size=128, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 128\n", + "\n", + "data_ops = {\n", + " 'blocks': (BioImageBlock(cls=BioImageStack), CategoryBlock)\n", + " #'get_items': get_image_files,\n", + " 'splitter': RandomSplitter(valid_pct=0.2),\n", + " 'item_tfms': [ScaleIntensity(), RandRot90(prob=0.5), RandFlip(prob=0.75)],\n", + " 'bs': batch_size,\n", + "}\n", + "\n", + "data = get_dataloader(\n", + " train_data, \n", + " show_summary=True,\n", + " **data_ops,\n", + " )\n", + "\n", + "# print length of training and validation datasets\n", + "#print('train images:', len(data.train_ds.items), '\\nvalidation images:', len(data.valid_ds.items))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.show_batch(max_n=2, cmap='hot')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load and train a 3D model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from monai.networks.nets import DenseNet121\n", + "from torchmetrics.classification import BinaryAccuracy\n", + "\n", + "net = DenseNet121(spatial_dims=3, in_channels=1, out_channels=1)\n", + "\n", + "loss = torch.nn.CrossEntropyLoss()\n", + "metric = BinaryAccuracy()\n", + "\n", + "trainer = fastTrainer(train_loader, net, loss_fn=loss, metrics=metric, show_summary=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer.fit_flat_cos(500)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer.show_results(cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# trainer.save('tmp-model')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test data \n", + "Evaluate the performance of the selected model on unseen data.\n", + "It’s important to not touch this data until you have fine tuned your model to get an unbiased evaluation!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "biomonai", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}