diff --git a/docs/TutorialNotebookExample.ipynb b/docs/00_Sig53DatasetTutorial.ipynb similarity index 77% rename from docs/TutorialNotebookExample.ipynb rename to docs/00_Sig53DatasetTutorial.ipynb index 4c3e87f..fe2ac4c 100644 --- a/docs/TutorialNotebookExample.ipynb +++ b/docs/00_Sig53DatasetTutorial.ipynb @@ -7,13 +7,11 @@ "tags": [] }, "source": [ - "## Tutorial Notebook Example\n", + "## Sig53 Dataset\n", "\n", - "last updated: 2023-07-23\n", + "last updated: 2023-07-29\n", "\n", - "This is a test notebook to show how we can include examples and tutorials as non-executed jupyter notebooks that get built during our documentation building on readthedocs. This way, we can still include code and code output to show the capabilities of TorchSig.\n", - "\n", - "This is just a template for how to do this and should be replaced :)" + "TODO: This notebook is just a placeholder for now." ] }, { @@ -21,9 +19,9 @@ "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", "metadata": {}, "source": [ - "### Generate Complex Sinusoid Example Data\n", + "### Modulation Family Background\n", "\n", - "TODO: Explain steps in tutorial in more detail" + "TODO: Describe and plot the modulation families present in Sig53. Should resemble Figure 3 from [Large Scale Radio Frequency Signal Classification](https://arxiv.org/pdf/2207.09918.pdf)." ] }, { @@ -33,6 +31,7 @@ "metadata": {}, "outputs": [], "source": [ + "#TODO: Replace this code block\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", @@ -53,9 +52,9 @@ "id": "2e3151e0-813b-4c07-bc86-72c72f92f6ee", "metadata": {}, "source": [ - "### Apply TorchSig Transform\n", + "### Instantiate the Sig53 Dataset\n", "\n", - "TODO: Explain steps in tutorial in more detail" + "TODO: Explain details of the Sig53 dataset and show code on how to instantiate the datasets." ] }, { @@ -65,6 +64,8 @@ "metadata": {}, "outputs": [], "source": [ + "#TODO: Replace this code block\n", + "\n", "from torchsig.transforms import AddNoise\n", "\n", "t = AddNoise(noise_power_db=-20)\n", diff --git a/docs/01_WidebandSig53DatasetTutorial.ipynb b/docs/01_WidebandSig53DatasetTutorial.ipynb new file mode 100644 index 0000000..e35633c --- /dev/null +++ b/docs/01_WidebandSig53DatasetTutorial.ipynb @@ -0,0 +1,114 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## WidebandSig53 Dataset\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "markdown", + "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", + "metadata": {}, + "source": [ + "### Dataset Background\n", + "\n", + "TODO: Describe and plot a few examples from WidebandSig53 (via the `WidebandModulations` class such that we are not asking sphinx to generate the full dataset). Should resemble Figure 1 from [Large Scale Wideband Radio Frequency Signal Detection & Recognition](https://arxiv.org/pdf/2211.10335.pdf)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7888b4a8-0b66-4d29-bf40-6bce2b83a288", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "num_samples = 4096\n", + "x = np.exp(2j * np.pi * 2.0 / num_samples * np.arange(num_samples))\n", + "\n", + "plt.figure()\n", + "plt.plot(x.real, c='b')\n", + "plt.plot(x.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "2e3151e0-813b-4c07-bc86-72c72f92f6ee", + "metadata": {}, + "source": [ + "### Instantiate the WidebandSig53 Dataset\n", + "\n", + "TODO: Explain details of the WidebandSig53 dataset and show code on how to instantiate the datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "462b7078-f064-4a2b-ba18-8c8b320e35ef", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "\n", + "from torchsig.transforms import AddNoise\n", + "\n", + "t = AddNoise(noise_power_db=-20)\n", + "\n", + "y = t(x)\n", + "\n", + "plt.figure()\n", + "plt.plot(y.real, c='b')\n", + "plt.plot(y.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/02_RadioMLDatasetTutorial.ipynb b/docs/02_RadioMLDatasetTutorial.ipynb new file mode 100644 index 0000000..fcafba5 --- /dev/null +++ b/docs/02_RadioMLDatasetTutorial.ipynb @@ -0,0 +1,80 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## RadioML Dataset\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "markdown", + "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", + "metadata": {}, + "source": [ + "### Dataset Background\n", + "\n", + "TODO: Describe and plot a few examples from RadioML" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7888b4a8-0b66-4d29-bf40-6bce2b83a288", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "num_samples = 4096\n", + "x = np.exp(2j * np.pi * 2.0 / num_samples * np.arange(num_samples))\n", + "\n", + "plt.figure()\n", + "plt.plot(x.real, c='b')\n", + "plt.plot(x.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/03_DataTransformTutorial.ipynb b/docs/03_DataTransformTutorial.ipynb new file mode 100644 index 0000000..5b194c6 --- /dev/null +++ b/docs/03_DataTransformTutorial.ipynb @@ -0,0 +1,114 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Data Transforms\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "markdown", + "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", + "metadata": {}, + "source": [ + "### Data Transform Background\n", + "\n", + "TODO: Define data transforms to be static or feature extraction methods. List transforms included." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7888b4a8-0b66-4d29-bf40-6bce2b83a288", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "num_samples = 4096\n", + "x = np.exp(2j * np.pi * 2.0 / num_samples * np.arange(num_samples))\n", + "\n", + "plt.figure()\n", + "plt.plot(x.real, c='b')\n", + "plt.plot(x.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "2e3151e0-813b-4c07-bc86-72c72f92f6ee", + "metadata": {}, + "source": [ + "### Feature Transforms\n", + "\n", + "TODO: Walk through each feature transform with plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "462b7078-f064-4a2b-ba18-8c8b320e35ef", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "\n", + "from torchsig.transforms import AddNoise\n", + "\n", + "t = AddNoise(noise_power_db=-20)\n", + "\n", + "y = t(x)\n", + "\n", + "plt.figure()\n", + "plt.plot(y.real, c='b')\n", + "plt.plot(y.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/04_TargetTransformTutorial.ipynb b/docs/04_TargetTransformTutorial.ipynb new file mode 100644 index 0000000..7c46173 --- /dev/null +++ b/docs/04_TargetTransformTutorial.ipynb @@ -0,0 +1,114 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Target Transforms\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "markdown", + "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", + "metadata": {}, + "source": [ + "### Target Transform Background\n", + "\n", + "TODO: Define targets transforms as the mapping from `SignalMetadata` to a target representation expected by ML models for a particular task (classification, detection, recognition, segmentation, etc.). List transforms included." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7888b4a8-0b66-4d29-bf40-6bce2b83a288", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "num_samples = 4096\n", + "x = np.exp(2j * np.pi * 2.0 / num_samples * np.arange(num_samples))\n", + "\n", + "plt.figure()\n", + "plt.plot(x.real, c='b')\n", + "plt.plot(x.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "2e3151e0-813b-4c07-bc86-72c72f92f6ee", + "metadata": {}, + "source": [ + "### Target Transforms\n", + "\n", + "TODO: Walk through each target transform with plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "462b7078-f064-4a2b-ba18-8c8b320e35ef", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "\n", + "from torchsig.transforms import AddNoise\n", + "\n", + "t = AddNoise(noise_power_db=-20)\n", + "\n", + "y = t(x)\n", + "\n", + "plt.figure()\n", + "plt.plot(y.real, c='b')\n", + "plt.plot(y.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/05_DataAugmentationTutorial.ipynb b/docs/05_DataAugmentationTutorial.ipynb new file mode 100644 index 0000000..c645869 --- /dev/null +++ b/docs/05_DataAugmentationTutorial.ipynb @@ -0,0 +1,114 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Data Augmentations\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "markdown", + "id": "d2d7597b-7f82-4493-b3b1-d4ef7b9c24b8", + "metadata": {}, + "source": [ + "### Data Augmentation Background\n", + "\n", + "TODO: Define data augmentations as randomized operations to augment the datasets during training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7888b4a8-0b66-4d29-bf40-6bce2b83a288", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "num_samples = 4096\n", + "x = np.exp(2j * np.pi * 2.0 / num_samples * np.arange(num_samples))\n", + "\n", + "plt.figure()\n", + "plt.plot(x.real, c='b')\n", + "plt.plot(x.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "2e3151e0-813b-4c07-bc86-72c72f92f6ee", + "metadata": {}, + "source": [ + "### Data Augmentation Examples\n", + "\n", + "TODO: Walk through all data augmentation techniques with plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "462b7078-f064-4a2b-ba18-8c8b320e35ef", + "metadata": {}, + "outputs": [], + "source": [ + "#TODO: Replace this code block\n", + "\n", + "from torchsig.transforms import AddNoise\n", + "\n", + "t = AddNoise(noise_power_db=-20)\n", + "\n", + "y = t(x)\n", + "\n", + "plt.figure()\n", + "plt.plot(y.real, c='b')\n", + "plt.plot(y.imag, c='r')\n", + "plt.title('Test plot')\n", + "plt.xlabel('samples')\n", + "plt.ylabel('amplitude')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/06_SignalClassificationTrainingTutorial.ipynb b/docs/06_SignalClassificationTrainingTutorial.ipynb new file mode 100644 index 0000000..fc45068 --- /dev/null +++ b/docs/06_SignalClassificationTrainingTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Classification Training Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/07_SignalClassificationInferenceTutorial.ipynb b/docs/07_SignalClassificationInferenceTutorial.ipynb new file mode 100644 index 0000000..c68340b --- /dev/null +++ b/docs/07_SignalClassificationInferenceTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Classification Inference Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/08_SignalClassificationFinetuningTutorial.ipynb b/docs/08_SignalClassificationFinetuningTutorial.ipynb new file mode 100644 index 0000000..cd50d2e --- /dev/null +++ b/docs/08_SignalClassificationFinetuningTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Classification Finetuning Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/09_SignalDetectionTrainingTutorial.ipynb b/docs/09_SignalDetectionTrainingTutorial.ipynb new file mode 100644 index 0000000..e2328a2 --- /dev/null +++ b/docs/09_SignalDetectionTrainingTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Detection Training Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/10_SignalDetectionInferenceTutorial.ipynb b/docs/10_SignalDetectionInferenceTutorial.ipynb new file mode 100644 index 0000000..bbd8ba7 --- /dev/null +++ b/docs/10_SignalDetectionInferenceTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Detection Inference Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/11_SignalDetectionFinetuningTutorial.ipynb b/docs/11_SignalDetectionFinetuningTutorial.ipynb new file mode 100644 index 0000000..2b1ef24 --- /dev/null +++ b/docs/11_SignalDetectionFinetuningTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Detection Finetuning Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/12_SignalRecognitionTrainingTutorial.ipynb b/docs/12_SignalRecognitionTrainingTutorial.ipynb new file mode 100644 index 0000000..7495ed8 --- /dev/null +++ b/docs/12_SignalRecognitionTrainingTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Recognition Training Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/13_SignalRecognitionInferenceTutorial.ipynb b/docs/13_SignalRecognitionInferenceTutorial.ipynb new file mode 100644 index 0000000..b775bac --- /dev/null +++ b/docs/13_SignalRecognitionInferenceTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Recognition Inference Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/14_SignalRecognitionFinetuningTutorial.ipynb b/docs/14_SignalRecognitionFinetuningTutorial.ipynb new file mode 100644 index 0000000..cf36dbe --- /dev/null +++ b/docs/14_SignalRecognitionFinetuningTutorial.ipynb @@ -0,0 +1,47 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ff639336-c092-4882-b08e-78f8b82f741e", + "metadata": { + "tags": [] + }, + "source": [ + "## Signal Recognition Finetuning Tutorial\n", + "\n", + "last updated: 2023-07-29\n", + "\n", + "TODO: This notebook is just a placeholder for now." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2166e37-6a60-4088-9c60-d2bb62d7834b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/index.rst b/docs/index.rst index a22676e..867688c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,16 +10,39 @@ TorchSig :width: 400 :alt: TorchSig -:mod:`TorchSig` is an open-source signals processing machine learning toolkit. +:mod:`TorchSig` is an open-source signal processing machine learning toolkit. .. toctree:: + :caption: Code API + :name: mastertoc + :maxdepth: 2 datasets transforms models utils - TutorialNotebookExample + +.. toctree:: + :caption: Tutorials + :name: tutorialtoc + :maxdepth: 2 + + 00_Sig53DatasetTutorial + 01_WidebandSig53DatasetTutorial + 02_RadioMLDatasetTutorial + 03_DataTransformTutorial + 04_TargetTransformTutorial + 05_DataAugmentationTutorial + 06_SignalClassificationTrainingTutorial + 07_SignalClassificationInferenceTutorial + 08_SignalClassificationFinetuningTutorial + 09_SignalDetectionTrainingTutorial + 10_SignalDetectionInferenceTutorial + 11_SignalDetectionFinetuningTutorial + 12_SignalRecognitionTrainingTutorial + 13_SignalRecognitionInferenceTutorial + 14_SignalRecognitionFinetuningTutorial .. automodule:: torchsig - :members: + :members: